Created
April 19, 2024 20:44
-
-
Save pashu123/10c59c0464c3e73b9da885bde7e94306 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump After AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After AutoInputConversionPipeline (iree-auto-input-conversion) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After IREEImportPublic (iree-import-public) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After ImportMLProgram (iree-import-ml-program) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After SanitizeModuleNames (iree-sanitize-module-names) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After ConvertMeshToFlow (iree-convert-mesh-to-flow) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = util.call @_turbine_llm_mmtfp_3d_8640_3200_f32f16(%2, %3) : (tensor<?x?x3200xf32>, tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> | |
%c0 = arith.constant 0 : index | |
%dim = tensor.dim %4, %c0 : tensor<?x?x8640xf32> | |
%c1 = arith.constant 1 : index | |
%dim_0 = tensor.dim %4, %c1 : tensor<?x?x8640xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<?x?x8640xf32>{%dim, %dim_0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
util.func private @_turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func private @_turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: tensor<?x?x3200xf32>, %arg1: tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %arg0, %c0 : tensor<?x?x3200xf32> | |
%dim_0 = tensor.dim %arg0, %c1 : tensor<?x?x3200xf32> | |
%0 = tensor.empty(%dim) : tensor<?x8640x3200xf16> | |
%1 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<8640x3200xf16>) outs(%0 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%2 = tensor.empty(%dim, %dim_0) : tensor<?x?x8640xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%4 = linalg.batch_matmul_transpose_b ins(%arg0, %1 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%3 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
util.return %4 : tensor<?x?x8640xf32> | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c1 = arith.constant 1 : index | |
%c0 = arith.constant 0 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = util.call @_turbine_llm_mmtfp_3d_8640_3200_f32f16(%2, %3) : (tensor<?x?x3200xf32>, tensor<8640x3200xf16>) -> tensor<?x?x8640xf32> | |
%dim = tensor.dim %4, %c0 : tensor<?x?x8640xf32> | |
%dim_0 = tensor.dim %4, %c1 : tensor<?x?x8640xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<?x?x8640xf32>{%dim, %dim_0} -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Inliner (inline) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After DemoteF64ToF32 (iree-util-demote-f64-to-f32) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After RemoveZeroExtentTensors (iree-global-opt-remove-zero-extent-tensors) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After DetachElementwiseFromNamedOps (iree-global-opt-detach-elementwise-from-named-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Convert1X1FilterConv2DToMatmul (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After EraseUnusedLinalgOperands (iree-global-opt-erase-unused-linalg-operands) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ExpandTensorShapes (iree-global-opt-expand-tensor-shapes) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After DecomposeConcat (iree-global-opt-decompose-concat) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FoldUnitExtentDims (iree-flow-fold-unit-extent-dims) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After FuseDequantizationMatmul (iree-global-opt-fuse-dequantization-matmul) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%8 = linalg.batch_matmul_transpose_b ins(%2, %5 : tensor<?x?x3200xf32>, tensor<?x8640x3200xf16>) outs(%7 : tensor<?x?x8640xf32>) -> tensor<?x?x8640xf32> | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SetEncoding (iree-global-opt-set-encoding) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> | |
#map4 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map5 = affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)> | |
#map6 = affine_map<()[s0, s1, s2] -> (-s1 + s2 + (s1 ceildiv s0) * s0)> | |
#map7 = affine_map<()[s0] -> ((8640 ceildiv s0) * s0)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%c1 = arith.constant 1 : index | |
%c0 = arith.constant 0 : index | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6:3 = iree_linalg_ext.upper_bound_tile_size tensor<?x?x3200xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f16, f32], user_indexing_maps = [#map2, #map3, #map4]>> -> index, index, index | |
%dim = tensor.dim %2, %c0 : tensor<?x?x3200xf32> | |
%7 = affine.apply #map5()[%6#0, %dim] | |
%dim_1 = tensor.dim %2, %c1 : tensor<?x?x3200xf32> | |
%8 = affine.apply #map5()[%6#1, %dim_1] | |
%9 = affine.apply #map5()[%6#2, %c3200] | |
%padded = tensor.pad %2 low[0, 0, 0] high[%7, %8, %9] { | |
^bb0(%arg2: index, %arg3: index, %arg4: index): | |
tensor.yield %cst_0 : f32 | |
} : tensor<?x?x3200xf32> to tensor<?x?x?xf32> | |
%10 = iree_linalg_ext.set_encoding %padded : tensor<?x?x?xf32> -> tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map2, #map3, #map4]>> | |
%11:3 = iree_linalg_ext.upper_bound_tile_size tensor<?x8640x3200xf16, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f16, f32], user_indexing_maps = [#map2, #map3, #map4]>> -> index, index, index | |
%12 = affine.apply #map5()[%11#0, %0] | |
%13 = affine.apply #map5()[%11#1, %c8640] | |
%14 = affine.apply #map5()[%11#2, %c3200] | |
%padded_2 = tensor.pad %5 low[0, 0, 0] high[%12, %13, %14] { | |
^bb0(%arg2: index, %arg3: index, %arg4: index): | |
tensor.yield %cst : f16 | |
} : tensor<?x8640x3200xf16> to tensor<?x?x?xf16> | |
%15 = iree_linalg_ext.set_encoding %padded_2 : tensor<?x?x?xf16> -> tensor<?x?x?xf16, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map2, #map3, #map4]>> | |
%16:3 = iree_linalg_ext.upper_bound_tile_size tensor<?x?x8640xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], user_indexing_maps = [#map2, #map3, #map4]>> -> index, index, index | |
%17 = affine.apply #map6()[%16#0, %0, %0] | |
%18 = affine.apply #map6()[%16#1, %1, %1] | |
%19 = affine.apply #map7()[%16#2] | |
%20 = tensor.empty(%17, %18, %19) : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map2, #map3, #map4]>> | |
%21 = linalg.fill ins(%cst_0 : f32) outs(%20 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map2, #map3, #map4]>>) -> tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map2, #map3, #map4]>> | |
%22 = linalg.batch_matmul_transpose_b ins(%10, %15 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [#map2, #map3, #map4]>>, tensor<?x?x?xf16, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [#map2, #map3, #map4]>>) outs(%21 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map2, #map3, #map4]>>) -> tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map2, #map3, #map4]>> | |
%23 = iree_linalg_ext.unset_encoding %22 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [#map2, #map3, #map4]>> -> tensor<?x?x?xf32> | |
%extracted_slice = tensor.extract_slice %23[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x?xf32> to tensor<?x?x8640xf32> | |
%24 = hal.tensor.export %extracted_slice "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %24 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CPUMaterializeUpperBoundTileSize (iree-codegen-cpu-materialize-upper-bound-tile-size) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c8640 = arith.constant 8640 : index | |
%c16 = arith.constant 16 : index | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c1 = arith.constant 1 : index | |
%c0 = arith.constant 0 : index | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%dim = tensor.dim %2, %c0 : tensor<?x?x3200xf32> | |
%6 = affine.apply affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>()[%c1, %dim] | |
%dim_1 = tensor.dim %2, %c1 : tensor<?x?x3200xf32> | |
%7 = affine.apply affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>()[%c16, %dim_1] | |
%padded = tensor.pad %2 low[0, 0, 0] high[%6, %7, %c0] { | |
^bb0(%arg2: index, %arg3: index, %arg4: index): | |
tensor.yield %cst_0 : f32 | |
} : tensor<?x?x3200xf32> to tensor<?x?x?xf32> | |
%8 = iree_linalg_ext.set_encoding %padded : tensor<?x?x?xf32> -> tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>> | |
%9 = affine.apply affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>()[%c1, %0] | |
%padded_2 = tensor.pad %5 low[0, 0, 0] high[%9, %c0, %c0] { | |
^bb0(%arg2: index, %arg3: index, %arg4: index): | |
tensor.yield %cst : f16 | |
} : tensor<?x8640x3200xf16> to tensor<?x?x?xf16> | |
%10 = iree_linalg_ext.set_encoding %padded_2 : tensor<?x?x?xf16> -> tensor<?x?x?xf16, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>> | |
%11 = affine.apply affine_map<()[s0, s1, s2] -> (-s1 + s2 + (s1 ceildiv s0) * s0)>()[%c1, %0, %0] | |
%12 = affine.apply affine_map<()[s0, s1, s2] -> (-s1 + s2 + (s1 ceildiv s0) * s0)>()[%c16, %1, %1] | |
%13 = tensor.empty(%11, %12, %c8640) : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>> | |
%14 = linalg.fill ins(%cst_0 : f32) outs(%13 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>>) -> tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>> | |
%15 = linalg.batch_matmul_transpose_b ins(%8, %10 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f16, f32], original_type = tensor<?x?x3200xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>>, tensor<?x?x?xf16, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f16, f32], original_type = tensor<?x8640x3200xf16>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>>) outs(%14 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>>) -> tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>> | |
%16 = iree_linalg_ext.unset_encoding %15 : tensor<?x?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f16, f32], original_type = tensor<?x?x8640xf32>, user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]>> -> tensor<?x?x?xf32> | |
%extracted_slice = tensor.extract_slice %16[0, 0, 0] [%0, %1, 8640] [1, 1, 1] : tensor<?x?x?xf32> to tensor<?x?x8640xf32> | |
%17 = hal.tensor.export %extracted_slice "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %17 : !hal.buffer_view | |
} | |
// -----// IR Dump After CPUMaterializeEncoding (iree-codegen-cpu-materialize-encoding) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c1 = arith.constant 1 : index | |
%c0 = arith.constant 0 : index | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%dim = tensor.dim %2, %c0 : tensor<?x?x3200xf32> | |
%dim_1 = tensor.dim %2, %c1 : tensor<?x?x3200xf32> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%dim_1] | |
%7 = tensor.empty(%dim, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst_0 : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_2 = tensor.pack %5 padding_value(%cst : f16) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = affine.apply affine_map<()[s0, s1, s2] -> (-s1 + s2 + (s1 ceildiv s0) * s0)>()[%c1, %0, %0] | |
%10 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%11 = tensor.empty(%9, %10) : tensor<?x?x540x16x16xf32> | |
%12 = linalg.fill ins(%cst_0 : f32) outs(%11 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%13 = linalg.batch_mmt4d ins(%pack, %pack_2 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%12 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%14 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %14 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%15 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After MaterializeHomogeneousEncodings (iree-global-opt-materialize-homogeneous-encodings) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map3 = affine_map<()[s0, s1, s2] -> (-s1 + s2 + (s1 ceildiv s0) * s0)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f16 | |
%c1 = arith.constant 1 : index | |
%c0 = arith.constant 0 : index | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%dim = tensor.dim %2, %c0 : tensor<?x?x3200xf32> | |
%dim_1 = tensor.dim %2, %c1 : tensor<?x?x3200xf32> | |
%6 = affine.apply #map2()[%dim_1] | |
%7 = tensor.empty(%dim, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst_0 : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_2 = tensor.pack %5 padding_value(%cst : f16) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = affine.apply #map3()[%c1, %0, %0] | |
%10 = affine.apply #map2()[%1] | |
%11 = tensor.empty(%9, %10) : tensor<?x?x540x16x16xf32> | |
%12 = linalg.fill ins(%cst_0 : f32) outs(%11 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%13 = linalg.batch_mmt4d ins(%pack, %pack_2 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%12 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%14 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %14 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%15 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = affine.apply #map2()[%1] | |
%10 = tensor.empty(%0, %9) : tensor<?x?x540x16x16xf32> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%11 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%13 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %13 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%14 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %14 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After SimplifyPackUnpack (iree-global-opt-simplify-pack-unpack) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After GlobalLoopInvariantCodeMotion (iree-global-opt-loop-invariant-code-motion) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After JitGlobals (iree-consteval-jit-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu], iree.fixedpoint.iteration = 0 : index} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After VerifyInputLegality (iree-verify-input-legality) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracing (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After TensorPadToTensorInsertSlice (iree-flow-tensor-pad-to-tensor-insert-slice) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#map2 = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply #map2()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InterchangeGenericOps (iree-flow-interchange-generic-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After FusionOfTensorOps (iree-flow-fusion-of-tensor-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After SplitReduction (iree-flow-split-reduction-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After InterchangeGenericOps (iree-flow-interchange-generic-ops) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormScalarDispatches (iree-flow-form-scalar-dispatches) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = tensor.empty(%0, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%pack_0 = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
%9 = tensor.empty(%0, %6) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%pack, %pack_0 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %11 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %12 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
%13 = hal.tensor.export %unpack "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %13 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchRegions (iree-flow-form-dispatch-regions) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%6 = tensor.empty(%0, %5) : tensor<?x?x3200x16x1xf32> | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%7 = flow.dispatch.region -> (tensor<?x?x3200x16x1xf32>{%0, %5}) { | |
%pack = tensor.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %6 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.return %pack : tensor<?x?x3200x16x1xf32> | |
} | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%c0_0 = arith.constant 0 : index | |
%9 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) { | |
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %16 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %8 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
%10 = tensor.empty(%0, %5) : tensor<?x?x540x16x16xf32> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = flow.dispatch.region -> (tensor<?x?x540x16x16xf32>{%0, %5}) { | |
%16 = linalg.batch_mmt4d ins(%7, %9 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%11 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.return %16 : tensor<?x?x540x16x16xf32> | |
} | |
%13 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%c0_1 = arith.constant 0 : index | |
%c1_2 = arith.constant 1 : index | |
%14 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %13 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.return %unpack : tensor<?x?x8640xf32> | |
} | |
%15 = hal.tensor.export %14 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After CloneProducersIntoDispatchRegions (iree-flow-clone-producers-into-dispatch-regions) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%6 = tensor.empty(%0, %5) : tensor<?x?x3200x16x1xf32> | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%7 = flow.dispatch.region -> (tensor<?x?x3200x16x1xf32>{%0, %5}) { | |
%cst_3 = arith.constant 0.000000e+00 : f32 | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%17 = tensor.empty(%0, %16) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst_3 : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %17 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.return %pack : tensor<?x?x3200x16x1xf32> | |
} | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%c0_0 = arith.constant 0 : index | |
%9 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) { | |
%16 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%17 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%17 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
%10 = tensor.empty(%0, %5) : tensor<?x?x540x16x16xf32> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = flow.dispatch.region -> (tensor<?x?x540x16x16xf32>{%0, %5}) { | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%cst_3 = arith.constant 0.000000e+00 : f32 | |
%17 = tensor.empty(%0, %16) : tensor<?x?x540x16x16xf32> | |
%18 = linalg.fill ins(%cst_3 : f32) outs(%17 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%19 = linalg.batch_mmt4d ins(%7, %9 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%18 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.return %19 : tensor<?x?x540x16x16xf32> | |
} | |
%13 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%c0_1 = arith.constant 0 : index | |
%c1_2 = arith.constant 1 : index | |
%14 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%16 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %16 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.return %unpack : tensor<?x?x8640xf32> | |
} | |
%15 = hal.tensor.export %14 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After CollapseDimensions (iree-flow-collapse-dimensions) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%6 = tensor.empty(%0, %5) : tensor<?x?x3200x16x1xf32> | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%7 = flow.dispatch.region -> (tensor<?x?x3200x16x1xf32>{%0, %5}) { | |
%cst_3 = arith.constant 0.000000e+00 : f32 | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%17 = tensor.empty(%0, %16) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %2 padding_value(%cst_3 : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %17 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.return %pack : tensor<?x?x3200x16x1xf32> | |
} | |
%8 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%c0_0 = arith.constant 0 : index | |
%9 = flow.dispatch.region -> (tensor<?x540x3200x16x1xf16>{%0}) { | |
%16 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%17 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%17 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %18 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %16 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.return %pack : tensor<?x540x3200x16x1xf16> | |
} | |
%10 = tensor.empty(%0, %5) : tensor<?x?x540x16x16xf32> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%12 = flow.dispatch.region -> (tensor<?x?x540x16x16xf32>{%0, %5}) { | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%cst_3 = arith.constant 0.000000e+00 : f32 | |
%17 = tensor.empty(%0, %16) : tensor<?x?x540x16x16xf32> | |
%18 = linalg.fill ins(%cst_3 : f32) outs(%17 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%19 = linalg.batch_mmt4d ins(%7, %9 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%18 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.return %19 : tensor<?x?x540x16x16xf32> | |
} | |
%13 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%c0_1 = arith.constant 0 : index | |
%c1_2 = arith.constant 1 : index | |
%14 = flow.dispatch.region -> (tensor<?x?x8640xf32>{%0, %1}) { | |
%16 = tensor.empty(%0, %1) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %16 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.return %unpack : tensor<?x?x8640xf32> | |
} | |
%15 = hal.tensor.export %14 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After FormDispatchWorkgroups (iree-flow-form-dispatch-workgroups) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch.workgroups[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%13 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [%11, %10, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} -> tensor<?x?x3200xf32> | |
%14 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%10] | |
%15 = tensor.empty(%11, %14) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %13 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %15 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [%11, %12, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%12 = tensor.empty(%10) : tensor<?x540x3200x16x1xf16> | |
%13 = tensor.empty(%10) : tensor<?x8640x3200xf16> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%11 : tensor<8640x3200xf16>) outs(%13 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %14 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %12 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %arg4, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%10 = flow.dispatch.workload.ordinal %arg2, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg5, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg6, 2 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%13 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [%11, %12, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%11, %12} -> tensor<?x?x3200x16x1xf32> | |
%14 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [%11, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%11} -> tensor<?x540x3200x16x1xf16> | |
%15 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%10] | |
%16 = tensor.empty(%11, %15) : tensor<?x?x540x16x16xf32> | |
%17 = linalg.fill ins(%cst : f32) outs(%16 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%18 = linalg.batch_mmt4d ins(%13, %14 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%17 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %18, %arg7, offsets = [0, 0, 0, 0, 0], sizes = [%11, %12, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0, 0, 0], sizes = [%11, %10, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} -> tensor<?x?x540x16x16xf32> | |
%14 = tensor.empty(%11, %12) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %13 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %14 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %arg6, offsets = [0, 0, 0], sizes = [%11, %12, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CaptureDynamicDims (iree-flow-capture-dynamic-dims) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch.workgroups[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%arg4, %arg3} | |
%11 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%arg4, %arg5} | |
%12 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%13 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%14 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%15 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%13, %12, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%13, %12} -> tensor<?x?x3200xf32> | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%12] | |
%17 = tensor.empty(%13, %16) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %15 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %17 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %11, offsets = [0, 0, 0, 0, 0], sizes = [%13, %14, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%13, %14} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%10 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%arg3} | |
%11 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%12 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%13 = tensor.empty(%11) : tensor<?x540x3200x16x1xf16> | |
%14 = tensor.empty(%11) : tensor<?x8640x3200xf16> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%12 : tensor<8640x3200xf16>) outs(%14 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %13 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %10, offsets = [0, 0, 0, 0, 0], sizes = [%11, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%11} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%10 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%arg5, %arg6} | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%arg5} | |
%12 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%arg5, %arg6} | |
%13 = flow.dispatch.workload.ordinal %arg2, 0 : index | |
%14 = flow.dispatch.workload.ordinal %arg5, 1 : index | |
%15 = flow.dispatch.workload.ordinal %arg6, 2 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%16 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0, 0, 0], sizes = [%14, %15, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%14, %15} -> tensor<?x?x3200x16x1xf32> | |
%17 = flow.dispatch.tensor.load %11, offsets = [0, 0, 0, 0, 0], sizes = [%14, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%14} -> tensor<?x540x3200x16x1xf16> | |
%18 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%13] | |
%19 = tensor.empty(%14, %18) : tensor<?x?x540x16x16xf32> | |
%20 = linalg.fill ins(%cst : f32) outs(%19 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%21 = linalg.batch_mmt4d ins(%16, %17 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%20 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %21, %12, offsets = [0, 0, 0, 0, 0], sizes = [%14, %15, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%14, %15} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%arg4, %arg3} | |
%11 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%arg4, %arg5} | |
%12 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%13 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%14 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%15 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0, 0, 0], sizes = [%13, %12, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%13, %12} -> tensor<?x?x540x16x16xf32> | |
%16 = tensor.empty(%13, %14) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %16 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %11, offsets = [0, 0, 0], sizes = [%13, %14, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%13, %14} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch.workgroups[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0], sizes = [%11, %10, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} -> tensor<?x?x3200xf32> | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%10] | |
%17 = tensor.empty(%11, %16) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %15 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %17 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %14, offsets = [0, 0, 0, 0, 0], sizes = [%11, %12, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%12 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%13 = tensor.empty(%10) : tensor<?x540x3200x16x1xf16> | |
%14 = tensor.empty(%10) : tensor<?x8640x3200xf16> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%12 : tensor<8640x3200xf16>) outs(%14 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %13 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %11, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg5, 1 : index | |
%11 = flow.dispatch.workload.ordinal %arg6, 2 : index | |
%12 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} | |
%13 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%14 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
%15 = flow.dispatch.workload.ordinal %arg2, 0 : index | |
%16 = flow.dispatch.tensor.load %12, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} -> tensor<?x?x3200x16x1xf32> | |
%17 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} -> tensor<?x540x3200x16x1xf16> | |
%18 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%15] | |
%19 = tensor.empty(%10, %18) : tensor<?x?x540x16x16xf32> | |
%20 = linalg.fill ins(%cst : f32) outs(%19 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%21 = linalg.batch_mmt4d ins(%16, %17 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%20 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %21, %14, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%11, %10, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} -> tensor<?x?x540x16x16xf32> | |
%16 = tensor.empty(%11, %12) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %16 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %14, offsets = [0, 0, 0], sizes = [%11, %12, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch.workgroups[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0], sizes = [%11, %10, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} -> tensor<?x?x3200xf32> | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%10] | |
%17 = tensor.empty(%11, %16) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %15 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %17 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %14, offsets = [0, 0, 0, 0, 0], sizes = [%11, %12, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%12 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%13 = tensor.empty(%10) : tensor<?x540x3200x16x1xf16> | |
%14 = tensor.empty(%10) : tensor<?x8640x3200xf16> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%12 : tensor<8640x3200xf16>) outs(%14 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %13 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %11, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg5, 1 : index | |
%11 = flow.dispatch.workload.ordinal %arg6, 2 : index | |
%12 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} | |
%13 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%14 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
%15 = flow.dispatch.workload.ordinal %arg2, 0 : index | |
%16 = flow.dispatch.tensor.load %12, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} -> tensor<?x?x3200x16x1xf32> | |
%17 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} -> tensor<?x540x3200x16x1xf16> | |
%18 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%15] | |
%19 = tensor.empty(%10, %18) : tensor<?x?x540x16x16xf32> | |
%20 = linalg.fill ins(%cst : f32) outs(%19 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%21 = linalg.batch_mmt4d ins(%16, %17 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%20 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %21, %14, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%11, %10, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} -> tensor<?x?x540x16x16xf32> | |
%16 = tensor.empty(%11, %12) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %16 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %14, offsets = [0, 0, 0], sizes = [%11, %12, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After InitializeEmptyTensors (iree-flow-initialize-empty-tensors) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch.workgroups[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0], sizes = [%11, %10, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} -> tensor<?x?x3200xf32> | |
%16 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%10] | |
%17 = tensor.empty(%11, %16) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %15 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %17 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %14, offsets = [0, 0, 0, 0, 0], sizes = [%11, %12, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%12 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%13 = tensor.empty(%10) : tensor<?x540x3200x16x1xf16> | |
%14 = tensor.empty(%10) : tensor<?x8640x3200xf16> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%12 : tensor<8640x3200xf16>) outs(%14 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %13 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %11, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg5, 1 : index | |
%11 = flow.dispatch.workload.ordinal %arg6, 2 : index | |
%12 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} | |
%13 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%14 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
%15 = flow.dispatch.workload.ordinal %arg2, 0 : index | |
%16 = flow.dispatch.tensor.load %12, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} -> tensor<?x?x3200x16x1xf32> | |
%17 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} -> tensor<?x540x3200x16x1xf16> | |
%18 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%15] | |
%19 = tensor.empty(%10, %18) : tensor<?x?x540x16x16xf32> | |
%20 = linalg.fill ins(%cst : f32) outs(%19 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%21 = linalg.batch_mmt4d ins(%16, %17 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%20 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %21, %14, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%11, %10, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} -> tensor<?x?x540x16x16xf32> | |
%16 = tensor.empty(%11, %12) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %16 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %14, offsets = [0, 0, 0], sizes = [%11, %12, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After OutlineDispatchExterns (iree-flow-outline-dispatch-externs) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch.workgroups[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0], sizes = [%11, %10, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%11, %10} -> tensor<?x?x3200xf32> | |
%16 = affine.apply #map()[%10] | |
%17 = tensor.empty(%11, %16) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %15 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %17 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %14, offsets = [0, 0, 0, 0, 0], sizes = [%11, %12, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%6 = flow.dispatch.workgroups[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%12 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%13 = tensor.empty(%10) : tensor<?x540x3200x16x1xf16> | |
%14 = tensor.empty(%10) : tensor<?x8640x3200xf16> | |
%15 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%12 : tensor<8640x3200xf16>) outs(%14 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %13 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %11, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%10} | |
flow.return | |
} count(%arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%7 = flow.dispatch.workgroups[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} = | |
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg5: index, %arg6: index, %arg7: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%10 = flow.dispatch.workload.ordinal %arg5, 1 : index | |
%11 = flow.dispatch.workload.ordinal %arg6, 2 : index | |
%12 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} | |
%13 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} | |
%14 = flow.dispatch.tie_shape %arg7 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
%15 = flow.dispatch.workload.ordinal %arg2, 0 : index | |
%16 = flow.dispatch.tensor.load %12, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%10, %11} -> tensor<?x?x3200x16x1xf32> | |
%17 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%10, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%10} -> tensor<?x540x3200x16x1xf16> | |
%18 = affine.apply #map()[%15] | |
%19 = tensor.empty(%10, %18) : tensor<?x?x540x16x16xf32> | |
%20 = linalg.fill ins(%cst : f32) outs(%19 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%21 = linalg.batch_mmt4d ins(%16, %17 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%20 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %21, %14, offsets = [0, 0, 0, 0, 0], sizes = [%10, %11, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%10, %11} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%8 = flow.dispatch.workgroups[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg3: index, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%10 = flow.dispatch.workload.ordinal %arg3, 0 : index | |
%11 = flow.dispatch.workload.ordinal %arg4, 1 : index | |
%12 = flow.dispatch.workload.ordinal %arg5, 2 : index | |
%13 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} | |
%14 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
%15 = flow.dispatch.tensor.load %13, offsets = [0, 0, 0, 0, 0], sizes = [%11, %10, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%11, %10} -> tensor<?x?x540x16x16xf32> | |
%16 = tensor.empty(%11, %12) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %15 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %16 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %14, offsets = [0, 0, 0], sizes = [%11, %12, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%11, %12} | |
flow.return | |
} count(%arg2: index, %arg3: index, %arg4: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3, %arg4 | |
flow.return %x, %y, %z : index, index, index | |
} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineDispatchRegions (iree-flow-outline-dispatch-regions) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After AnnotateDispatches (iree-flow-annotate-dispatches) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After DeduplicateExecutables (iree-flow-deduplicate-executables) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After InjectTensorTracing (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After CleanupTensorShapes (iree-flow-cleanup-tensor-shapes) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SymbolDCE (symbol-dce) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After OutlineConstants (iree-util-outline-constants) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>>, %arg1: index, %arg2: !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%2 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%3 = tensor.empty(%0) : tensor<?x540x3200x16x1xf16> | |
%4 = tensor.empty(%0) : tensor<?x8640x3200xf16> | |
%5 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<8640x3200xf16>) outs(%4 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %3 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %1, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%0} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>, %arg3: index, %arg4: index, %arg5: !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = flow.dispatch.tie_shape %arg5 : !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
flow.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
flow.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>, %arg1: index, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>) { | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%2 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} | |
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<8640x3200xf16> | |
%4 = affine.apply #map()[%1] | |
%5 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %4](%2, %1, %0, %4) : (tensor<?x?x3200xf32>{%0, %1}, index, index, index) -> tensor<?x?x3200x16x1xf32>{%0, %4} | |
%6 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%3, %0) : (tensor<8640x3200xf16>, index) -> tensor<?x540x3200x16x1xf16>{%0} | |
%7 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %4](%1, %5, %6, %0, %4) : (index, tensor<?x?x3200x16x1xf32>{%0, %4}, tensor<?x540x3200x16x1xf16>{%0}, index, index) -> tensor<?x?x540x16x16xf32>{%0, %4} | |
%8 = flow.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%4, %0, %1](%7, %4, %0, %1) : (tensor<?x?x540x16x16xf32>{%0, %4}, index, index, index) -> tensor<?x?x8640xf32>{%0, %1} | |
%9 = hal.tensor.export %8 "output0" : tensor<?x?x8640xf32>{%0, %1} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c3200 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
%c8640 = arith.constant 8640 : index | |
%c3200_1 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200_1]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply #map()[%1] | |
%c0 = arith.constant 0 : index | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%c0_2 = arith.constant 0 : index | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0_2 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%c0_3 = arith.constant 0 : index | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0_3 to %9 for %9], %12[%c0_3 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%c0_4 = arith.constant 0 : index | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0_4 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c3200 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
%c8640 = arith.constant 8640 : index | |
%c3200_1 = arith.constant 3200 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200_1]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply #map()[%1] | |
%c0 = arith.constant 0 : index | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%c0_2 = arith.constant 0 : index | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0_2 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%c0_3 = arith.constant 0 : index | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0_3 to %9 for %9], %12[%c0_3 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%c0_4 = arith.constant 0 : index | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0_4 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
%dense_row_major_0 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major_0) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply #map()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply #map()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply #map()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply #map()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = stream.tensor.sizeof tensor<?x?x3200xf32>{%0, %1} : index | |
%3 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%2} | |
%4 = stream.async.transfer %3 : !stream.resource<external>{%2} -> !stream.resource<*>{%2} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.sizeof tensor<8640x3200xf16> : index | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%5} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} -> !stream.resource<*>{%5} | |
%8 = affine.apply #map()[%1] | |
%9 = stream.tensor.sizeof tensor<?x?x3200x16x1xf32>{%0, %8} : index | |
%10 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%4[%c0 to %2 for %2], %1, %0, %8) : (!stream.resource<*>{%2}, index, index, index) -> !stream.resource<*>{%9} | |
%11 = stream.tensor.sizeof tensor<?x540x3200x16x1xf16>{%0} : index | |
%12 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%5}, index) -> !stream.resource<*>{%11} | |
%13 = stream.tensor.sizeof tensor<?x?x540x16x16xf32>{%0, %8} : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %10[%c0 to %9 for %9], %12[%c0 to %11 for %11], %0, %8) : (index, !stream.resource<*>{%9}, !stream.resource<*>{%11}, index, index) -> !stream.resource<*>{%13} | |
%15 = stream.tensor.sizeof tensor<?x?x8640xf32>{%0, %1} : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%14[%c0 to %13 for %13], %8, %0, %1) : (!stream.resource<*>{%13}, index, index, index) -> !stream.resource<*>{%15} | |
%17 = stream.async.transfer %16 : !stream.resource<*>{%15} -> !stream.resource<external>{%15} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%15} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply #map()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply #map()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply #map()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply #map()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply #map()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply #map()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%6 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%7 = stream.async.transfer %6 : !stream.resource<external>{%c55296000} -> !stream.resource<*>{%c55296000} | |
%8 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%9 = arith.muli %0, %c204800 : index | |
%10 = arith.muli %9, %8 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %8](%5[%c0 to %3 for %3], %1, %0, %8) : (!stream.resource<*>{%3}, index, index, index) -> !stream.resource<*>{%10} | |
%12 = arith.muli %0, %c55296000 : index | |
%13 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%7[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<*>{%c55296000}, index) -> !stream.resource<*>{%12} | |
%14 = arith.muli %0, %c552960 : index | |
%15 = arith.muli %14, %8 : index | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %8](%1, %11[%c0 to %10 for %10], %13[%c0 to %12 for %12], %0, %8) : (index, !stream.resource<*>{%10}, !stream.resource<*>{%12}, index, index) -> !stream.resource<*>{%15} | |
%17 = arith.muli %0, %c34560 : index | |
%18 = arith.muli %17, %1 : index | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%8, %0, %1](%16[%c0 to %15 for %15], %8, %0, %1) : (!stream.resource<*>{%15}, index, index, index) -> !stream.resource<*>{%18} | |
%20 = stream.async.transfer %19 : !stream.resource<*>{%18} -> !stream.resource<external>{%18} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%18} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%10 = arith.muli %0, %c55296000 : index | |
%11 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%10} | |
%12 = arith.muli %0, %c552960 : index | |
%13 = arith.muli %12, %6 : index | |
%14 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%10}, index, index) -> !stream.resource<transient>{%13} | |
%15 = arith.muli %0, %c34560 : index | |
%16 = arith.muli %15, %1 : index | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%14[%c0 to %13 for %13], %6, %0, %1) : (!stream.resource<transient>{%13}, index, index, index) -> !stream.resource<external>{%16} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%16} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg2[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg3[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16[%c0 to %8 for %8], %17[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%18[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %19 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%14 = stream.timepoint.immediate => !stream.timepoint | |
%15 = stream.timepoint.immediate => !stream.timepoint | |
%16 = stream.timepoint.join max(%14, %15) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute await(%16) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%22 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%23 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %22, %23 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %19#0[%c0 to %8 for %8], %19#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%21 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%20[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %21 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%14 = stream.timepoint.immediate => !stream.timepoint | |
%15 = stream.timepoint.immediate => !stream.timepoint | |
%16 = stream.timepoint.join max(%14, %15) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute await(%16) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%19:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%22 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%23 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %22, %23 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %19#0[%c0 to %8 for %8], %19#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%21 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%20[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %21 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%17 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%18 = stream.tensor.export %17 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %18 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After IPO (iree-util-ipo) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%results, %result_timepoint = stream.async.execute with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}) -> !stream.resource<external>{%13} { | |
%16:2 = stream.async.concurrent with(%arg2 as %arg4: !stream.resource<external>{%3}, %arg3 as %arg5: !stream.resource<external>{%c55296000}) -> (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}) { | |
%19 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%arg4[%c0 to %3 for %3], %1, %0, %6) : (!stream.resource<external>{%3}, index, index, index) -> !stream.resource<transient>{%8} | |
%20 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%arg5[%c0 to %c55296000 for %c55296000], %0) : (!stream.resource<external>{%c55296000}, index) -> !stream.resource<transient>{%9} | |
stream.yield %19, %20 : !stream.resource<transient>{%8}, !stream.resource<transient>{%9} | |
} | |
%17 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %16#0[%c0 to %8 for %8], %16#1[%c0 to %9 for %9], %0, %6) : (index, !stream.resource<transient>{%8}, !stream.resource<transient>{%9}, index, index) -> !stream.resource<transient>{%11} | |
%18 = stream.async.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%17[%c0 to %11 for %11], %6, %0, %1) : (!stream.resource<transient>{%11}, index, index, index) -> !stream.resource<external>{%13} | |
stream.yield %18 : !stream.resource<external>{%13} | |
} => !stream.timepoint | |
%14 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%13} | |
%15 = stream.tensor.export %14 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %15 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%13} => !stream.timepoint | |
%14:4 = stream.resource.pack slices({ | |
[0, 1] = %8, | |
[0, 1] = %9, | |
[1, 2] = %11 | |
}) : index | |
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized : !stream.resource<transient>{%14#0} => !stream.timepoint | |
%15 = stream.timepoint.join max(%result_timepoint, %result_timepoint_2) => !stream.timepoint | |
%16 = stream.cmd.execute await(%15) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}, %result as %arg4: !stream.resource<external>{%13}, %result_1 as %arg5: !stream.resource<transient>{%14#0}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%14#1 for %8] : !stream.resource<transient>{%14#0} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg3[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg5[%14#2 for %9] : !stream.resource<transient>{%14#0} | |
} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg5[%14#1 for %8] : !stream.resource<transient>{%14#0}, | |
ro %arg5[%14#2 for %9] : !stream.resource<transient>{%14#0}, | |
wo %arg5[%14#3 for %11] : !stream.resource<transient>{%14#0} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%6, %0, %1 : index, index, index) { | |
ro %arg5[%14#3 for %11] : !stream.resource<transient>{%14#0}, | |
wo %arg4[%c0_0 for %13] : !stream.resource<external>{%13} | |
} | |
} => !stream.timepoint | |
%17 = stream.resource.dealloca await(%16) => %result_1 : !stream.resource<transient>{%14#0} => !stream.timepoint | |
%18 = stream.timepoint.join max(%17, %16) => !stream.timepoint | |
%19 = stream.timepoint.await %18 => %result : !stream.resource<external>{%13} | |
%20 = stream.tensor.export %19 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%13} => !stream.timepoint | |
%14:4 = stream.resource.pack slices({ | |
[0, 1] = %8, | |
[0, 1] = %9, | |
[1, 2] = %11 | |
}) : index | |
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized : !stream.resource<transient>{%14#0} => !stream.timepoint | |
%15 = stream.timepoint.join max(%result_timepoint, %result_timepoint_2) => !stream.timepoint | |
%16 = stream.cmd.execute await(%15) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}, %result as %arg4: !stream.resource<external>{%13}, %result_1 as %arg5: !stream.resource<transient>{%14#0}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%14#1 for %8] : !stream.resource<transient>{%14#0} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg3[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg5[%14#2 for %9] : !stream.resource<transient>{%14#0} | |
} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg5[%14#1 for %8] : !stream.resource<transient>{%14#0}, | |
ro %arg5[%14#2 for %9] : !stream.resource<transient>{%14#0}, | |
wo %arg5[%14#3 for %11] : !stream.resource<transient>{%14#0} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%6, %0, %1 : index, index, index) { | |
ro %arg5[%14#3 for %11] : !stream.resource<transient>{%14#0}, | |
wo %arg4[%c0_0 for %13] : !stream.resource<external>{%13} | |
} | |
} => !stream.timepoint | |
%17 = stream.resource.dealloca await(%16) => %result_1 : !stream.resource<transient>{%14#0} => !stream.timepoint | |
%18 = stream.timepoint.join max(%17, %16) => !stream.timepoint | |
%19 = stream.timepoint.await %18 => %result : !stream.resource<external>{%13} | |
%20 = stream.tensor.export %19 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %20 : !hal.buffer_view | |
} | |
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%13} => !stream.timepoint | |
%c0_1 = arith.constant 0 : index | |
%c64 = arith.constant 64 : index | |
%14 = arith.addi %8, %c0_1 : index | |
%c64_2 = arith.constant 64 : index | |
%c64_3 = arith.constant 64 : index | |
%15 = arith.addi %14, %9 : index | |
%c64_4 = arith.constant 64 : index | |
%c64_5 = arith.constant 64 : index | |
%16 = arith.addi %15, %11 : index | |
%c64_6 = arith.constant 64 : index | |
%c64_7 = arith.constant 64 : index | |
%result_8, %result_timepoint_9 = stream.resource.alloca uninitialized : !stream.resource<transient>{%16} => !stream.timepoint | |
%17 = stream.timepoint.join max(%result_timepoint, %result_timepoint_9) => !stream.timepoint | |
%18 = stream.cmd.execute await(%17) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}, %result as %arg4: !stream.resource<external>{%13}, %result_8 as %arg5: !stream.resource<transient>{%16}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0_1 for %8] : !stream.resource<transient>{%16} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg3[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg5[%14 for %9] : !stream.resource<transient>{%16} | |
} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg5[%c0_1 for %8] : !stream.resource<transient>{%16}, | |
ro %arg5[%14 for %9] : !stream.resource<transient>{%16}, | |
wo %arg5[%15 for %11] : !stream.resource<transient>{%16} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%6, %0, %1 : index, index, index) { | |
ro %arg5[%15 for %11] : !stream.resource<transient>{%16}, | |
wo %arg4[%c0_0 for %13] : !stream.resource<external>{%13} | |
} | |
} => !stream.timepoint | |
%19 = stream.resource.dealloca await(%18) => %result_8 : !stream.resource<transient>{%16} => !stream.timepoint | |
%20 = stream.timepoint.join max(%19, %18) => !stream.timepoint | |
%21 = stream.timepoint.await %20 => %result : !stream.resource<external>{%13} | |
%22 = stream.tensor.export %21 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %22 : !hal.buffer_view | |
} | |
// -----// IR Dump After PropagateSubranges (iree-util-propagate-subranges) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#device_target_llvm_cpu = #hal.device.target<"llvm-cpu", [#executable_target_embedded_elf_x86_64_]> | |
module attributes {hal.device.targets = [#device_target_llvm_cpu]} { | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [%1, %0, 3200], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200xf32>>{%1, %0} -> tensor<?x?x3200xf32> | |
%6 = affine.apply #map()[%0] | |
%7 = tensor.empty(%1, %6) : tensor<?x?x3200x16x1xf32> | |
%pack = tensor.pack %5 padding_value(%cst : f32) outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %7 : tensor<?x?x3200xf32> -> tensor<?x?x3200x16x1xf32> | |
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0, 0], sizes = [%1, %2, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x?x3200x16x1xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x3200x16x1xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack workgroups(%arg0: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack(%arg0: !stream.binding, %arg1: index, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> | |
%1 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8640, 3200], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8640x3200xf16>> -> tensor<8640x3200xf16> | |
%4 = tensor.empty(%1) : tensor<?x540x3200x16x1xf16> | |
%5 = tensor.empty(%1) : tensor<?x8640x3200xf16> | |
%6 = linalg.generic {indexing_maps = [#map1, #map2], iterator_types = ["parallel", "parallel", "parallel"]} ins(%3 : tensor<8640x3200xf16>) outs(%5 : tensor<?x8640x3200xf16>) { | |
^bb0(%in: f16, %out: f16): | |
linalg.yield %in : f16 | |
} -> tensor<?x8640x3200xf16> | |
%pack = tensor.pack %6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 1] into %4 : tensor<?x8640x3200xf16> -> tensor<?x540x3200x16x1xf16> | |
flow.dispatch.tensor.store %pack, %2, offsets = [0, 0, 0, 0, 0], sizes = [%1, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : tensor<?x540x3200x16x1xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x540x3200x16x1xf16>>{%1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32(%arg0: index, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.workload.ordinal %arg3, 1 : index | |
%1 = flow.dispatch.workload.ordinal %arg4, 2 : index | |
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} | |
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} | |
%4 = stream.binding.subspan %arg5[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
%5 = flow.dispatch.workload.ordinal %arg0, 0 : index | |
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x3200x16x1xf32>>{%0, %1} -> tensor<?x?x3200x16x1xf32> | |
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%0, 540, 3200, 16, 1], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x540x3200x16x1xf16>>{%0} -> tensor<?x540x3200x16x1xf16> | |
%8 = affine.apply #map()[%5] | |
%9 = tensor.empty(%0, %8) : tensor<?x?x540x16x16xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
%11 = linalg.batch_mmt4d ins(%6, %7 : tensor<?x?x3200x16x1xf32>, tensor<?x540x3200x16x1xf16>) outs(%10 : tensor<?x?x540x16x16xf32>) -> tensor<?x?x540x16x16xf32> | |
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [%0, %1, 540, 16, 16], strides = [1, 1, 1, 1, 1] : tensor<?x?x540x16x16xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x540x16x16xf32>>{%0, %1} | |
return | |
} | |
} | |
} | |
stream.executable private @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3 { | |
stream.executable.export public @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32 workgroups(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1, %arg2 | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32(%arg0: !stream.binding, %arg1: index, %arg2: index, %arg3: index, %arg4: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = flow.dispatch.workload.ordinal %arg1, 0 : index | |
%1 = flow.dispatch.workload.ordinal %arg2, 1 : index | |
%2 = flow.dispatch.workload.ordinal %arg3, 2 : index | |
%3 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} | |
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
%5 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0, 0, 0], sizes = [%1, %0, 540, 16, 16], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x540x16x16xf32>>{%1, %0} -> tensor<?x?x540x16x16xf32> | |
%6 = tensor.empty(%1, %2) : tensor<?x?x8640xf32> | |
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [16, 16] into %6 : tensor<?x?x540x16x16xf32> -> tensor<?x?x8640xf32> | |
flow.dispatch.tensor.store %unpack, %4, offsets = [0, 0, 0], sizes = [%1, %2, 8640], strides = [1, 1, 1] : tensor<?x?x8640xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x8640xf32>>{%1, %2} | |
return | |
} | |
} | |
} | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply #map()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%13} => !stream.timepoint | |
%c0_1 = arith.constant 0 : index | |
%c64 = arith.constant 64 : index | |
%14 = arith.addi %8, %c0_1 : index | |
%c64_2 = arith.constant 64 : index | |
%c64_3 = arith.constant 64 : index | |
%15 = arith.addi %14, %9 : index | |
%c64_4 = arith.constant 64 : index | |
%c64_5 = arith.constant 64 : index | |
%16 = arith.addi %15, %11 : index | |
%c64_6 = arith.constant 64 : index | |
%c64_7 = arith.constant 64 : index | |
%result_8, %result_timepoint_9 = stream.resource.alloca uninitialized : !stream.resource<transient>{%16} => !stream.timepoint | |
%17 = stream.timepoint.join max(%result_timepoint, %result_timepoint_9) => !stream.timepoint | |
%18 = stream.cmd.execute await(%17) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}, %result as %arg4: !stream.resource<external>{%13}, %result_8 as %arg5: !stream.resource<transient>{%16}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0_1 for %8] : !stream.resource<transient>{%16} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg3[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg5[%14 for %9] : !stream.resource<transient>{%16} | |
} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg5[%c0_1 for %8] : !stream.resource<transient>{%16}, | |
ro %arg5[%14 for %9] : !stream.resource<transient>{%16}, | |
wo %arg5[%15 for %11] : !stream.resource<transient>{%16} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%6, %0, %1 : index, index, index) { | |
ro %arg5[%15 for %11] : !stream.resource<transient>{%16}, | |
wo %arg4[%c0_0 for %13] : !stream.resource<external>{%13} | |
} | |
} => !stream.timepoint | |
%19 = stream.resource.dealloca await(%18) => %result_8 : !stream.resource<transient>{%16} => !stream.timepoint | |
%20 = stream.timepoint.join max(%19, %18) => !stream.timepoint | |
%21 = stream.timepoint.await %20 => %result : !stream.resource<external>{%13} | |
%22 = stream.tensor.export %21 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %22 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump After Canonicalizer (canonicalize) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%13} => !stream.timepoint | |
%14 = arith.addi %8, %9 : index | |
%15 = arith.addi %14, %11 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized : !stream.resource<transient>{%15} => !stream.timepoint | |
%16 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%17 = stream.cmd.execute await(%16) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}, %result as %arg4: !stream.resource<external>{%13}, %result_0 as %arg5: !stream.resource<transient>{%15}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %8] : !stream.resource<transient>{%15} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg3[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg5[%8 for %9] : !stream.resource<transient>{%15} | |
} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg5[%c0 for %8] : !stream.resource<transient>{%15}, | |
ro %arg5[%8 for %9] : !stream.resource<transient>{%15}, | |
wo %arg5[%14 for %11] : !stream.resource<transient>{%15} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%6, %0, %1 : index, index, index) { | |
ro %arg5[%14 for %11] : !stream.resource<transient>{%15}, | |
wo %arg4[%c0 for %13] : !stream.resource<external>{%13} | |
} | |
} => !stream.timepoint | |
%18 = stream.resource.dealloca await(%17) => %result_0 : !stream.resource<transient>{%15} => !stream.timepoint | |
%19 = stream.timepoint.join max(%18, %17) => !stream.timepoint | |
%20 = stream.timepoint.await %19 => %result : !stream.resource<external>{%13} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After CSE (cse) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%13} => !stream.timepoint | |
%14 = arith.addi %8, %9 : index | |
%15 = arith.addi %14, %11 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized : !stream.resource<transient>{%15} => !stream.timepoint | |
%16 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%17 = stream.cmd.execute await(%16) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}, %result as %arg4: !stream.resource<external>{%13}, %result_0 as %arg5: !stream.resource<transient>{%15}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %8] : !stream.resource<transient>{%15} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg3[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg5[%8 for %9] : !stream.resource<transient>{%15} | |
} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg5[%c0 for %8] : !stream.resource<transient>{%15}, | |
ro %arg5[%8 for %9] : !stream.resource<transient>{%15}, | |
wo %arg5[%14 for %11] : !stream.resource<transient>{%15} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%6, %0, %1 : index, index, index) { | |
ro %arg5[%14 for %11] : !stream.resource<transient>{%15}, | |
wo %arg4[%c0 for %13] : !stream.resource<external>{%13} | |
} | |
} => !stream.timepoint | |
%18 = stream.resource.dealloca await(%17) => %result_0 : !stream.resource<transient>{%15} => !stream.timepoint | |
%19 = stream.timepoint.join max(%18, %17) => !stream.timepoint | |
%20 = stream.timepoint.await %19 => %result : !stream.resource<external>{%13} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @turbine_llm_mmtfp_3d_8640_3200_f32f16(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @turbine_llm_mmtfp_3d_8640_3200_f32f16(%input0: tensor<?x?x3200xf32>, %input1: tensor<8640x3200xf16>) -> (%output0: tensor<?x?x8640xf32>)"}} { | |
%c34560 = arith.constant 34560 : index | |
%c552960 = arith.constant 552960 : index | |
%c204800 = arith.constant 204800 : index | |
%c55296000 = arith.constant 55296000 : index | |
%c12800 = arith.constant 12800 : index | |
%c0 = arith.constant 0 : index | |
%c8640 = arith.constant 8640 : index | |
%c3200 = arith.constant 3200 : index | |
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index | |
%1 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[1] : index | |
%element_type_f32 = hal.element_type<f32> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0, %1, %c3200]) type(%element_type_f32) encoding(%dense_row_major) | |
%2 = arith.muli %0, %c12800 : index | |
%3 = arith.muli %2, %1 : index | |
%4 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<?x?x3200xf32>{%0, %1} in !stream.resource<external>{%3} | |
%element_type_f16 = hal.element_type<f16> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c8640, %c3200]) type(%element_type_f16) encoding(%dense_row_major) | |
%5 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<8640x3200xf16> in !stream.resource<external>{%c55296000} | |
%6 = affine.apply affine_map<()[s0] -> (s0 ceildiv 16)>()[%1] | |
%7 = arith.muli %0, %c204800 : index | |
%8 = arith.muli %7, %6 : index | |
%9 = arith.muli %0, %c55296000 : index | |
%10 = arith.muli %0, %c552960 : index | |
%11 = arith.muli %10, %6 : index | |
%12 = arith.muli %0, %c34560 : index | |
%13 = arith.muli %12, %1 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%13} => !stream.timepoint | |
%14 = arith.addi %8, %9 : index | |
%15 = arith.addi %14, %11 : index | |
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized : !stream.resource<transient>{%15} => !stream.timepoint | |
%16 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint | |
%17 = stream.cmd.execute await(%16) => with(%4 as %arg2: !stream.resource<external>{%3}, %5 as %arg3: !stream.resource<external>{%c55296000}, %result as %arg4: !stream.resource<external>{%13}, %result_0 as %arg5: !stream.resource<transient>{%15}) { | |
stream.cmd.concurrent { | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_0_pack_f32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg2[%c0 for %3] : !stream.resource<external>{%3}, | |
wo %arg5[%c0 for %8] : !stream.resource<transient>{%15} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_1_generic_Dx8640x3200_f16_pack[%0](%0 : index) { | |
ro %arg3[%c0 for %c55296000] : !stream.resource<external>{%c55296000}, | |
wo %arg5[%8 for %9] : !stream.resource<transient>{%15} | |
} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_2_batch_mmt4d_DxDx540x3200x16x16x1_f32xf16xf32[%1, %0, %6](%1, %0, %6 : index, index, index) { | |
ro %arg5[%c0 for %8] : !stream.resource<transient>{%15}, | |
ro %arg5[%8 for %9] : !stream.resource<transient>{%15}, | |
wo %arg5[%14 for %11] : !stream.resource<transient>{%15} | |
} | |
stream.cmd.dispatch @turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3::@turbine_llm_mmtfp_3d_8640_3200_f32f16_dispatch_3_unpack_f32[%6, %0, %1](%6, %0, %1 : index, index, index) { | |
ro %arg5[%14 for %11] : !stream.resource<transient>{%15}, | |
wo %arg4[%c0 for %13] : !stream.resource<external>{%13} | |
} | |
} => !stream.timepoint | |
%18 = stream.resource.dealloca await(%17) => %result_0 : !stream.resource<transient>{%15} => !stream.timepoint | |
%19 = stream.timepoint.join max(%18, %17) => !stream.timepoint | |
%20 = stream.timepoint.await %19 => %result : !stream.resource<external>{%13} | |
%21 = stream.tensor.export %20 : tensor<?x?x8640xf32>{%0, %1} in !stream.resource<external>{%13} -> !hal.buffer_view | |
util.return %21 : !hal.buffer_view | |
} | |
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,-avx512pf,+sse4.2,-tsxldtrk,-ptwrite,-widekl,-sm3,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-avx512er,-ccmp,-amx-int8,-kl,-avx10.1-256,-sha512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,+gfni,-avxvnniint16,-amx-fp16,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-prefetchwt1,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}> | |
#map = affine_map<()[s0] -> (s0 ceildiv 16)> | |
#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> | |
#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> | |
#d |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment