Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created January 17, 2025 15:21
Show Gist options
  • Save pashu123/80b390451b6fff90ce146b557c3b5a37 to your computer and use it in GitHub Desktop.
Save pashu123/80b390451b6fff90ce146b557c3b5a37 to your computer and use it in GitHub Desktop.
hal.executable public @prefill_bs1$async_dispatch_39 {
hal.executable.variant public @embedded_elf_x86_64 target(<"llvm-cpu", "embedded-elf-x86_64", {cpu = "znver4", cpu_features = "+prfchw,-cldemote,+avx,+aes,+sahf,+pclmul,-xop,+crc32,-amx-fp8,+xsaves,-avx512fp16,-usermsr,-sm4,-egpr,+sse4.1,+avx512ifma,+xsave,+sse4.2,-tsxldtrk,-sm3,-ptwrite,-widekl,-movrs,+invpcid,+64bit,+xsavec,-avx10.1-512,+avx512vpopcntdq,+cmov,-avx512vp2intersect,+avx512cd,+movbe,-avxvnniint8,-ccmp,-amx-int8,-kl,-avx10.1-256,+evex512,-avxvnni,-rtm,+adx,+avx2,-hreset,-movdiri,-serialize,-sha512,+vpclmulqdq,+avx512vl,-uintr,-cf,+clflushopt,-raoint,-cmpccxadd,+bmi,-amx-tile,+sse,-avx10.2-256,+gfni,-avxvnniint16,-amx-fp16,-zu,-ndd,+xsaveopt,+rdrnd,+avx512f,-amx-bf16,+avx512bf16,+avx512vnni,-push2pop2,+cx8,+avx512bw,+sse3,+pku,-nf,-amx-tf32,-amx-avx512,+fsgsbase,+clzero,+mwaitx,-lwp,+lzcnt,+sha,-movdir64b,-ppx,+wbnoinvd,-enqcmd,-amx-transpose,-avx10.2-512,-avxneconvert,-tbm,-pconfig,-amx-complex,+ssse3,+cx16,+bmi2,+fma,+popcnt,-avxifma,+f16c,+avx512bitalg,+rdpru,+clwb,+mmx,+sse2,+rdseed,+avx512vbmi2,-prefetchi,-amx-movrs,+rdpid,-fma4,+avx512vbmi,+shstk,+vaes,-waitpkg,-sgx,+fxsr,+avx512dq,+sse4a", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 64 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>) {
hal.executable.export public @prefill_bs1$async_dispatch_39_matmul_like_Dx2048x176x32_f16xf16xf32 ordinal(0) layout(#hal.pipeline.layout<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) {
^bb0(%arg0: !hal.device, %arg1: index):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg1
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @prefill_bs1$async_dispatch_39_matmul_like_Dx2048x176x32_f16xf16xf32() {
%c12976128 = arith.constant 12976128 : index
%c12255232 = arith.constant 12255232 : index
%cst = arith.constant 0.000000e+00 : f32
%c200704000 = arith.constant 200704000 : index
%0 = hal.interface.constant.load layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) ordinal(0) : i32
%1 = hal.interface.constant.load layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) ordinal(1) : i32
%2 = hal.interface.constant.load layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) ordinal(2) : i32
%3 = arith.index_castui %0 : i32 to index
%4 = arith.index_castui %1 : i32 to index
%5 = arith.index_castui %2 : i32 to index
%6:3 = util.assume.int
%3<umin = 202473856, umax = 568173952>,
%4<umin = 200835072, umax = 208961536>,
%5<umin = 32, umax = 2016, udiv = 32>
: index, index, index
%7 = hal.interface.binding.subspan layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c12255232) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<2048x176xf16>>
%8 = hal.interface.binding.subspan layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c12976128) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<2048x176x32xi8>>
%9 = flow.dispatch.workload.ordinal %6#2, 0 : index
%10 = hal.interface.binding.subspan layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%6#0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<?x176x32xf16>>{%9}
%11 = hal.interface.binding.subspan layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%6#1) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<?x2048xf16>>{%9}
%12 = hal.interface.binding.subspan layout(<constants = 3, bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c200704000) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<?x2048xf16>>{%9}
%13 = flow.dispatch.tensor.load %7, offsets = [0, 0], sizes = [2048, 176], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<2048x176xf16>> -> tensor<2048x176xf16>
%14 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0], sizes = [2048, 176, 32], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2048x176x32xi8>> -> tensor<2048x176x32xi8>
%15 = flow.dispatch.tensor.load %10, offsets = [0, 0, 0], sizes = [%9, 176, 32], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x176x32xf16>>{%9} -> tensor<?x176x32xf16>
%16 = flow.dispatch.tensor.load %11, offsets = [0, 0], sizes = [%9, 2048], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<?x2048xf16>>{%9} -> tensor<?x2048xf16>
%17 = tensor.empty(%9) : tensor<?x2048xf16>
%18 = tensor.empty(%9) : tensor<?x2048xf32>
%19 = tensor.empty() : tensor<2048x176x32xf16>
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%13, %14 : tensor<2048x176xf16>, tensor<2048x176x32xi8>) outs(%19 : tensor<2048x176x32xf16>) {
^bb0(%in: f16, %in_0: i8, %out: f16):
%24 = arith.extsi %in_0 : i8 to i32
%25 = arith.sitofp %24 : i32 to f16
%26 = arith.mulf %25, %in : f16
linalg.yield %26 : f16
} -> tensor<2048x176x32xf16>
%21 = linalg.fill ins(%cst : f32) outs(%18 : tensor<?x2048xf32>) -> tensor<?x2048xf32>
%22 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>], iterator_types = ["parallel", "parallel", "reduction", "reduction"]} ins(%15, %20 : tensor<?x176x32xf16>, tensor<2048x176x32xf16>) outs(%21 : tensor<?x2048xf32>) {
^bb0(%in: f16, %in_0: f16, %out: f32):
%24 = arith.mulf %in, %in_0 : f16
%25 = arith.extf %24 : f16 to f32
%26 = arith.addf %25, %out : f32
linalg.yield %26 : f32
} -> tensor<?x2048xf32>
%23 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%16, %22 : tensor<?x2048xf16>, tensor<?x2048xf32>) outs(%17 : tensor<?x2048xf16>) {
^bb0(%in: f16, %in_0: f32, %out: f16):
%24 = arith.truncf %in_0 : f32 to f16
%25 = arith.addf %in, %24 : f16
linalg.yield %25 : f16
} -> tensor<?x2048xf16>
flow.dispatch.tensor.store %23, %12, offsets = [0, 0], sizes = [%9, 2048], strides = [1, 1] : tensor<?x2048xf16> -> !flow.dispatch.tensor<writeonly:tensor<?x2048xf16>>{%9}
return
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment