Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created October 1, 2024 14:36
Show Gist options
  • Save pashu123/e2d83896b9d5338b4fb8a7260e6ceca1 to your computer and use it in GitHub Desktop.
Save pashu123/e2d83896b9d5338b4fb8a7260e6ceca1 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
func.func @mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
return %2 : tensor<?xf32>
}
}
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
}
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
}
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%0 = affine.apply #map()[%arg0]
%c1 = arith.constant 1 : index
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
}
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
}
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = util.call @_mixed_invocation(%1, %3) : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c0 = arith.constant 0 : index
%dim = tensor.dim %4, %c0 : tensor<?xf32>
%5 = hal.tensor.export %4 "output0" : tensor<?xf32>{%dim} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
util.func private @_mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_mixed_invocation(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%dim = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = flow.dispatch @executable::@simple_mul[%dim](%arg0, %arg1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> tensor<?xf32>{%dim}
%1 = arith.addf %0, %arg1 : tensor<?xf32>
%2 = flow.dispatch @executable::@simple_mul_inplace[%dim](%0, %1, %dim) : (tensor<?xf32>{%dim}, tensor<?xf32>{%dim}, index) -> %1{%dim}
util.return %2 : tensor<?xf32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = util.call @_mixed_invocation(%1, %3) : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%dim = tensor.dim %4, %c0 : tensor<?xf32>
%5 = hal.tensor.export %4 "output0" : tensor<?xf32>{%dim} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {hal.device.targets = [#device_target_local]} {
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After Convert1X1FilterConv2DToMatmulPass (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = arith.addf %4, %3 : tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After MaterializeEncodingIntoNopPass (iree-codegen-materialize-encoding-into-nop) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%4 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
linalg.yield %8 : f32
} -> tensor<?xf32>
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%dim = tensor.dim %4, %c0 : tensor<?xf32>
%5 = tensor.empty(%dim) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = flow.dispatch.region -> (tensor<?xf32>{%0}) {
%9 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%5 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%10 = arith.addf %in, %in_0 : f32
linalg.yield %10 : f32
} -> tensor<?xf32>
flow.return %9 : tensor<?xf32>
}
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = flow.dispatch.region -> (tensor<?xf32>{%0}) {
%9 = tensor.empty(%0) : tensor<?xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%9 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<?xf32>
flow.return %10 : tensor<?xf32>
}
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = flow.dispatch.region -> (tensor<?xf32>{%0}) {
%9 = tensor.empty(%0) : tensor<?xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%4, %3 : tensor<?xf32>, tensor<?xf32>) outs(%9 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<?xf32>
flow.return %10 : tensor<?xf32>
}
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = tensor.empty(%0) : tensor<?xf32>
%6 = flow.dispatch.workgroups(%0, %4, %3, %0, %2, %0) : (index, tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index, index) -> tensor<?xf32>{%0} =
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg5: index, %arg6: index, %arg7: index, %arg8: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%9 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [%arg7], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg7} -> tensor<?xf32>
%10 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [%arg6], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg6} -> tensor<?xf32>
%11 = tensor.empty(%arg7) : tensor<?xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%9, %10 : tensor<?xf32>, tensor<?xf32>) outs(%11 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%13 = arith.addf %in, %in_0 : f32
linalg.yield %13 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %12, %arg8, offsets = [0], sizes = [%arg7], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%arg7}
flow.return
}
%7 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %6, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %6{%0}
%8 = hal.tensor.export %7 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups(%0, %4, %3, %0, %2, %0) : (index, tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index, index) -> tensor<?xf32>{%0} =
(%arg2: index, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg5: index, %arg6: index, %arg7: index, %arg8: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [%arg7], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg7} -> tensor<?xf32>
%9 = flow.dispatch.tensor.load %arg4, offsets = [0], sizes = [%arg6], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg6} -> tensor<?xf32>
%10 = tensor.empty(%arg7) : tensor<?xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%8, %9 : tensor<?xf32>, tensor<?xf32>) outs(%10 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
linalg.yield %12 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %11, %arg8, offsets = [0], sizes = [%arg7], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%arg7}
flow.return
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups(%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [%arg5], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg5} -> tensor<?xf32>
%9 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [%arg4], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg4} -> tensor<?xf32>
%10 = tensor.empty(%arg5) : tensor<?xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%8, %9 : tensor<?xf32>, tensor<?xf32>) outs(%10 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
linalg.yield %12 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0], sizes = [%arg5], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%arg5}
flow.return
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.workload.ordinal %arg4, 0 : index
%9 = flow.dispatch.workload.ordinal %arg5, 1 : index
%10 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [%9], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9} -> tensor<?xf32>
%11 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [%8], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8} -> tensor<?xf32>
%12 = tensor.empty(%9) : tensor<?xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%10, %11 : tensor<?xf32>, tensor<?xf32>) outs(%12 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%14 = arith.addf %in, %in_0 : f32
linalg.yield %14 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0], sizes = [%9], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
flow.return
} count(%arg2: index, %arg3: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3
flow.return %x, %y, %z : index, index, index
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.workload.ordinal %arg4, 0 : index
%9 = flow.dispatch.workload.ordinal %arg5, 1 : index
%10 = flow.dispatch.tensor.load %arg2, offsets = [0], sizes = [%9], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9} -> tensor<?xf32>
%11 = flow.dispatch.tensor.load %arg3, offsets = [0], sizes = [%8], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8} -> tensor<?xf32>
%12 = tensor.empty(%9) : tensor<?xf32>
%13 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%10, %11 : tensor<?xf32>, tensor<?xf32>) outs(%12 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%14 = arith.addf %in, %in_0 : f32
linalg.yield %14 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %13, %arg6, offsets = [0], sizes = [%9], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
flow.return
} count(%arg2: index, %arg3: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3
flow.return %x, %y, %z : index, index, index
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg5}
%9 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%arg4}
%10 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%arg5}
%11 = flow.dispatch.workload.ordinal %arg4, 0 : index
%12 = flow.dispatch.workload.ordinal %arg5, 1 : index
%13 = flow.dispatch.tensor.load %8, offsets = [0], sizes = [%12], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%12} -> tensor<?xf32>
%14 = flow.dispatch.tensor.load %9, offsets = [0], sizes = [%11], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%11} -> tensor<?xf32>
%15 = tensor.empty(%12) : tensor<?xf32>
%16 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%13, %14 : tensor<?xf32>, tensor<?xf32>) outs(%15 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%17 = arith.addf %in, %in_0 : f32
linalg.yield %17 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %16, %10, offsets = [0], sizes = [%12], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%12}
flow.return
} count(%arg2: index, %arg3: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3
flow.return %x, %y, %z : index, index, index
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.workload.ordinal %arg4, 0 : index
%9 = flow.dispatch.workload.ordinal %arg5, 1 : index
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9}
%11 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8}
%12 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
%13 = flow.dispatch.tensor.load %10, offsets = [0], sizes = [%9], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9} -> tensor<?xf32>
%14 = flow.dispatch.tensor.load %11, offsets = [0], sizes = [%8], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8} -> tensor<?xf32>
%15 = tensor.empty(%9) : tensor<?xf32>
%16 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%13, %14 : tensor<?xf32>, tensor<?xf32>) outs(%15 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%17 = arith.addf %in, %in_0 : f32
linalg.yield %17 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %16, %12, offsets = [0], sizes = [%9], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
flow.return
} count(%arg2: index, %arg3: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3
flow.return %x, %y, %z : index, index, index
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.workload.ordinal %arg4, 0 : index
%9 = flow.dispatch.workload.ordinal %arg5, 1 : index
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9}
%11 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8}
%12 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
%13 = flow.dispatch.tensor.load %10, offsets = [0], sizes = [%9], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9} -> tensor<?xf32>
%14 = flow.dispatch.tensor.load %11, offsets = [0], sizes = [%8], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8} -> tensor<?xf32>
%15 = tensor.empty(%9) : tensor<?xf32>
%16 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%13, %14 : tensor<?xf32>, tensor<?xf32>) outs(%15 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%17 = arith.addf %in, %in_0 : f32
linalg.yield %17 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %16, %12, offsets = [0], sizes = [%9], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
flow.return
} count(%arg2: index, %arg3: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3
flow.return %x, %y, %z : index, index, index
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.workload.ordinal %arg4, 0 : index
%9 = flow.dispatch.workload.ordinal %arg5, 1 : index
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9}
%11 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8}
%12 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
%13 = flow.dispatch.tensor.load %10, offsets = [0], sizes = [%9], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9} -> tensor<?xf32>
%14 = flow.dispatch.tensor.load %11, offsets = [0], sizes = [%8], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8} -> tensor<?xf32>
%15 = tensor.empty(%9) : tensor<?xf32>
%16 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%13, %14 : tensor<?xf32>, tensor<?xf32>) outs(%15 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%17 = arith.addf %in, %in_0 : f32
linalg.yield %17 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %16, %12, offsets = [0], sizes = [%9], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
flow.return
} count(%arg2: index, %arg3: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3
flow.return %x, %y, %z : index, index, index
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch.workgroups[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0} =
(%arg2: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg4: index, %arg5: index, %arg6: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%8 = flow.dispatch.workload.ordinal %arg4, 0 : index
%9 = flow.dispatch.workload.ordinal %arg5, 1 : index
%10 = flow.dispatch.tie_shape %arg2 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9}
%11 = flow.dispatch.tie_shape %arg3 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8}
%12 = flow.dispatch.tie_shape %arg6 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
%13 = flow.dispatch.tensor.load %10, offsets = [0], sizes = [%9], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%9} -> tensor<?xf32>
%14 = flow.dispatch.tensor.load %11, offsets = [0], sizes = [%8], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%8} -> tensor<?xf32>
%15 = tensor.empty(%9) : tensor<?xf32>
%16 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%13, %14 : tensor<?xf32>, tensor<?xf32>) outs(%15 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%17 = arith.addf %in, %in_0 : f32
linalg.yield %17 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %16, %12, offsets = [0], sizes = [%9], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%9}
flow.return
} count(%arg2: index, %arg3: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg2, %arg3
flow.return %x, %y, %z : index, index, index
}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- //
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
flow.executable private @mixed_invocation_dispatch_0 {
flow.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<?xf32>>, %arg2: index, %arg3: index, %arg4: !flow.dispatch.tensor<writeonly:tensor<?xf32>>) {
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = flow.dispatch.tie_shape %arg0 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = flow.dispatch.tie_shape %arg1 : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = flow.dispatch.tie_shape %arg4 : !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%1 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<?xf32>{%0}
%2 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%3 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<?xf32>{%2}
%4 = flow.dispatch @executable::@simple_mul[%0](%1, %3, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> tensor<?xf32>{%0}
%5 = flow.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%2, %0](%4, %3, %2, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%2}, index, index) -> tensor<?xf32>{%0}
%6 = flow.dispatch @executable::@simple_mul_inplace[%0](%4, %5, %0) : (tensor<?xf32>{%0}, tensor<?xf32>{%0}, index) -> %5{%0}
%7 = hal.tensor.export %6 "output0" : tensor<?xf32>{%0} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%c0 = arith.constant 0 : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%8}
%c0_2 = arith.constant 0 : index
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%9[%c0_2 to %8 for %8], %7[%c0_2 to %5 for %5], %4, %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%10}
%c0_3 = arith.constant 0 : index
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%9[%c0_3 to %8 for %8], %11[%c0_3 to %10 for %10], %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%10}, index) -> %11{%10}
%13 = stream.async.transfer %12 : !stream.resource<*>{%10} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%10}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?xf32>{%0} in !stream.resource<external>{%10} -> !hal.buffer_view
util.return %14 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%c0 = arith.constant 0 : index
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%8}
%c0_2 = arith.constant 0 : index
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%9[%c0_2 to %8 for %8], %7[%c0_2 to %5 for %5], %4, %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%10}
%c0_3 = arith.constant 0 : index
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%9[%c0_3 to %8 for %8], %11[%c0_3 to %10 for %10], %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%10}, index) -> %11{%10}
%13 = stream.async.transfer %12 : !stream.resource<*>{%10} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%10}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?xf32>{%0} in !stream.resource<external>{%10} -> !hal.buffer_view
util.return %14 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%8}
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%9[%c0 to %8 for %8], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%10}
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%10}, index) -> %11{%10}
%13 = stream.async.transfer %12 : !stream.resource<*>{%10} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%10}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?xf32>{%0} in !stream.resource<external>{%10} -> !hal.buffer_view
util.return %14 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%8}
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%9[%c0 to %8 for %8], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%10}
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%10}, index) -> %11{%10}
%13 = stream.async.transfer %12 : !stream.resource<*>{%10} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%10}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?xf32>{%0} in !stream.resource<external>{%10} -> !hal.buffer_view
util.return %14 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%8}
%10 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%11 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%9[%c0 to %8 for %8], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%10}
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%9[%c0 to %8 for %8], %11[%c0 to %10 for %10], %0) : (!stream.resource<*>{%8}, !stream.resource<*>{%10}, index) -> %11{%10}
%13 = stream.async.transfer %12 : !stream.resource<*>{%10} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%10}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<?xf32>{%0} in !stream.resource<external>{%10} -> !hal.buffer_view
util.return %14 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%0} : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<?xf32>{%4} : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}> = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}> = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply affine_map<()[s0] -> (s0 ceildiv 64)>()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply affine_map<()[s0] -> (s0 ceildiv 64)>()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = stream.async.transfer %2 : !stream.resource<external>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%1}
%4 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%4]) type(%element_type_f32) encoding(%dense_row_major)
%5 = arith.muli %4, %c4 : index
%6 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%4} in !stream.resource<external>{%5}
%7 = stream.async.transfer %6 : !stream.resource<external>{%5} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%5}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%3[%c0 to %1 for %1], %7[%c0 to %5 for %5], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index) -> !stream.resource<*>{%1}
%9 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%4, %0](%8[%c0 to %1 for %1], %7[%c0 to %5 for %5], %4, %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%5}, index, index) -> !stream.resource<*>{%1}
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<*>{%1}, !stream.resource<*>{%1}, index) -> %9{%1}
%11 = stream.async.transfer %10 : !stream.resource<*>{%1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul[%0](%2[%c0 to %1 for %1], %5[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%6[%c0 to %1 for %1], %5[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%8 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @executable::@simple_mul_inplace[%0](%6[%c0 to %1 for %1], %7[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %7{%1}
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %9 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.timepoint.immediate => !stream.timepoint
%7 = stream.timepoint.immediate => !stream.timepoint
%8 = stream.timepoint.join max(%6, %7) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%8) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%11 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%12 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%11[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%13 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%11[%c0 to %1 for %1], %12[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %12{%1}
stream.yield %13 : !stream.resource<external>{%1}
} => !stream.timepoint
%9 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%10 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %9 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%6 = stream.timepoint.immediate => !stream.timepoint
%7 = stream.timepoint.immediate => !stream.timepoint
%8 = stream.timepoint.join max(%6, %7) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%8) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%11 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%12 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%11[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%13 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%11[%c0 to %1 for %1], %12[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %12{%1}
stream.yield %13 : !stream.resource<external>{%1}
} => !stream.timepoint
%9 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%10 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %9 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}) -> !stream.resource<external>{%1} {
%8 = stream.async.dispatch @executable::@simple_mul[%0](%arg2[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %0) : (!stream.resource<external>{%1}, !stream.resource<external>{%4}, index) -> !stream.resource<transient>{%1}
%9 = stream.async.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%8[%c0 to %1 for %1], %arg3[%c0 to %4 for %4], %3, %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%4}, index, index) -> !stream.resource<external>{%1}
%10 = stream.async.dispatch @executable::@simple_mul_inplace[%0](%8[%c0 to %1 for %1], %9[%c0 to %1 for %1], %0) : (!stream.resource<transient>{%1}, !stream.resource<external>{%1}, index) -> %9{%1}
stream.yield %10 : !stream.resource<external>{%1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%1}
%7 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %6 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6:2 = stream.resource.pack on(#hal.device.affinity<@__device_0>) slices({
[0, 2] = %1
}) : index
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6#0} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_2) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_1 as %arg5: !stream.resource<transient>{%6#0}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%6#1 for %1] : !stream.resource<transient>{%6#0}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%6#1 for %1] : !stream.resource<transient>{%6#0},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0_0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%6#1 for %1] : !stream.resource<transient>{%6#0},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_1 : !stream.resource<transient>{%6#0} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6:2 = stream.resource.pack on(#hal.device.affinity<@__device_0>) slices({
[0, 2] = %1
}) : index
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6#0} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_2) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_1 as %arg5: !stream.resource<transient>{%6#0}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%6#1 for %1] : !stream.resource<transient>{%6#0}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%6#1 for %1] : !stream.resource<transient>{%6#0},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0_0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%6#1 for %1] : !stream.resource<transient>{%6#0},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_1 : !stream.resource<transient>{%6#0} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%c0_1 = arith.constant 0 : index
%c64 = arith.constant 64 : index
%6 = util.align %1, %c64 : index
%7 = arith.addi %6, %c0_1 : index
%c64_2 = arith.constant 64 : index
%c64_3 = arith.constant 64 : index
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%7} => !stream.timepoint
%8 = stream.timepoint.join max(%result_timepoint, %result_timepoint_5) => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%8) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_4 as %arg5: !stream.resource<transient>{%7}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0_1 for %1] : !stream.resource<transient>{%7}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0_1 for %1] : !stream.resource<transient>{%7},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0_0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0_1 for %1] : !stream.resource<transient>{%7},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%10 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%9) => %result_4 : !stream.resource<transient>{%7} => !stream.timepoint
%11 = stream.timepoint.join max(%10, %9) => !stream.timepoint
%12 = stream.timepoint.await %11 => %result : !stream.resource<external>{%1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After PropagateSubranges (iree-util-propagate-subranges) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%c0_1 = arith.constant 0 : index
%c64 = arith.constant 64 : index
%6 = util.align %1, %c64 : index
%7 = arith.addi %6, %c0_1 : index
%c64_2 = arith.constant 64 : index
%c64_3 = arith.constant 64 : index
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%7} => !stream.timepoint
%8 = stream.timepoint.join max(%result_timepoint, %result_timepoint_5) => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%8) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_4 as %arg5: !stream.resource<transient>{%7}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0_1 for %1] : !stream.resource<transient>{%7}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0_1 for %1] : !stream.resource<transient>{%7},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0_0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0_1 for %1] : !stream.resource<transient>{%7},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%10 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%9) => %result_4 : !stream.resource<transient>{%7} => !stream.timepoint
%11 = stream.timepoint.join max(%10, %9) => !stream.timepoint
%12 = stream.timepoint.await %11 => %result : !stream.resource<external>{%1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.join max(%9, %8) => !stream.timepoint
%11 = stream.timepoint.await %10 => %result : !stream.resource<external>{%1}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 0 : index, iree.fixedpoint.modified, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.immediate => !stream.timepoint
%11 = stream.timepoint.join max(%9, %10) => !stream.timepoint
%12 = stream.timepoint.await %11 => %result : !stream.resource<external>{%1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 1 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 1 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 1 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 1 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {iree.fixedpoint.iteration = 1 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg3}
%3 = stream.binding.subspan %arg2[%c0] : !stream.binding -> memref<?xf32>{%arg3}
call @simple_mul_workgroup(%1, %2, %3, %arg3, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> memref<?xf32>{%arg2}
%2 = stream.binding.subspan %arg1[%c0] : !stream.binding -> memref<?xf32>{%arg2}
call @simple_mul_inplace_workgroup(%1, %2, %arg2, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg2, 0 : index
%1 = flow.dispatch.workload.ordinal %arg3, 1 : index
%2 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%0 : index) {
ro %arg2[%c0 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0 for %1] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%3, %0 : index, index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
ro %arg3[%c0 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%0 : index) {
ro %arg5[%c0 for %1] : !stream.resource<transient>{%6},
rw %arg4[%c0 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: index, %arg6: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> memref<?xf32>{%arg6}
%2 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> memref<?xf32>{%arg6}
%3 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> memref<?xf32>{%arg6}
call @simple_mul_workgroup(%1, %2, %3, %arg6, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: index, %arg3: index, %arg4: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%arg2] : !stream.binding -> memref<?xf32>{%arg4}
%2 = stream.binding.subspan %arg1[%arg3] : !stream.binding -> memref<?xf32>{%arg4}
call @simple_mul_inplace_workgroup(%1, %2, %arg4, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: index, %arg6: index, %arg7: index) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg6, 0 : index
%1 = flow.dispatch.workload.ordinal %arg7, 1 : index
%2 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%c0_2 = arith.constant 0 : index
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%c0, %c0, %c0, %0 : index, index, index, index) {
ro %arg2[%c0_2 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0_2 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0_2 for %6] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%c0, %c0, %c0, %3, %0 : index, index, index, index, index) {
ro %arg5[%c0_2 for %6] : !stream.resource<transient>{%6},
ro %arg3[%c0_2 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0_2 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%c0, %c0, %0 : index, index, index) {
ro %arg5[%c0_2 for %6] : !stream.resource<transient>{%6},
rw %arg4[%c0_2 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}, %arg6: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> memref<?xf32>{%arg6}
%2 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> memref<?xf32>{%arg6}
%3 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> memref<?xf32>{%arg6}
call @simple_mul_workgroup(%1, %2, %3, %arg6, %0) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: index {stream.values = [0 : index]}, %arg3: index {stream.values = [0 : index]}, %arg4: index) {
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%0 = affine.apply #map1()[%workgroup_id_0]
%1 = stream.binding.subspan %arg0[%arg2] : !stream.binding -> memref<?xf32>{%arg4}
%2 = stream.binding.subspan %arg1[%arg3] : !stream.binding -> memref<?xf32>{%arg4}
call @simple_mul_inplace_workgroup(%1, %2, %arg4, %0) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}, %arg6: index, %arg7: index) {
%c0 = arith.constant 0 : index
%0 = flow.dispatch.workload.ordinal %arg6, 0 : index
%1 = flow.dispatch.workload.ordinal %arg7, 1 : index
%2 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1}
%3 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0}
%4 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
%5 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [%1], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%1} -> tensor<?xf32>
%6 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [%0], strides = [1] : !flow.dispatch.tensor<readonly:tensor<?xf32>>{%0} -> tensor<?xf32>
%7 = tensor.empty(%1) : tensor<?xf32>
%8 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%5, %6 : tensor<?xf32>, tensor<?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>
flow.dispatch.tensor.store %8, %4, offsets = [0], sizes = [%1], strides = [1] : tensor<?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?xf32>>{%1}
return
}
}
}
util.func public @mixed_invocation(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @mixed_invocation(%input0: tensor<?xf32>, %input1: tensor<?xf32>) -> (%output0: tensor<?xf32>)"}} {
%c64 = arith.constant 64 : index
%c4 = arith.constant 4 : index
%c0 = arith.constant 0 : index
%0 = hal.buffer_view.dim<%arg0 : !hal.buffer_view>[0] : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%0]) type(%element_type_f32) encoding(%dense_row_major)
%1 = arith.muli %0, %c4 : index
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<?xf32>{%0} in !stream.resource<external>{%1}
%3 = hal.buffer_view.dim<%arg1 : !hal.buffer_view>[0] : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%3]) type(%element_type_f32) encoding(%dense_row_major)
%4 = arith.muli %3, %c4 : index
%5 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<?xf32>{%3} in !stream.resource<external>{%4}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%1} => !stream.timepoint
%6 = util.align %1, %c64 : index
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%6} => !stream.timepoint
%7 = stream.timepoint.join max(%result_timepoint, %result_timepoint_1) => !stream.timepoint
%c0_2 = arith.constant 0 : index
%8 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%7) => with(%2 as %arg2: !stream.resource<external>{%1}, %5 as %arg3: !stream.resource<external>{%4}, %result as %arg4: !stream.resource<external>{%1}, %result_0 as %arg5: !stream.resource<transient>{%6}) {
stream.cmd.dispatch @executable::@simple_mul[%0](%c0, %c0, %c0, %0 : index, index, index, index) {
ro %arg2[%c0_2 for %1] : !stream.resource<external>{%1},
ro %arg3[%c0_2 for %4] : !stream.resource<external>{%4},
wo %arg5[%c0_2 for %6] : !stream.resource<transient>{%6}
}
stream.cmd.dispatch @mixed_invocation_dispatch_0::@mixed_invocation_dispatch_0_elementwise_D_f32[%3, %0](%c0, %c0, %c0, %3, %0 : index, index, index, index, index) {
ro %arg5[%c0_2 for %6] : !stream.resource<transient>{%6},
ro %arg3[%c0_2 for %4] : !stream.resource<external>{%4},
wo %arg4[%c0_2 for %1] : !stream.resource<external>{%1}
}
stream.cmd.dispatch @executable::@simple_mul_inplace[%0](%c0, %c0, %0 : index, index, index) {
ro %arg5[%c0_2 for %6] : !stream.resource<transient>{%6},
rw %arg4[%c0_2 for %1] : !stream.resource<external>{%1}
}
} => !stream.timepoint
%9 = stream.resource.dealloca on(#hal.device.affinity<@__device_0>) await(%8) => %result_0 : !stream.resource<transient>{%6} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result : !stream.resource<external>{%1}
%11 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %10 : tensor<?xf32>{%0} in !stream.resource<external>{%1} -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- //
#executable_target_embedded_elf_arm_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-arm_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 16 : index, target_triple = "aarch64-none-elf"}>
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", native_vector_size = 32 : index, target_triple = "x86_64-none-elf"}>
#map = affine_map<()[s0] -> (s0 ceildiv 64)>
#map1 = affine_map<()[s0] -> (s0 * 64)>
#map2 = affine_map<(d0) -> (d0)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_arm_64_, #executable_target_embedded_elf_x86_64_]> : !hal.device
module @example attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @executable attributes {hal.executable.objects = #hal.executable.objects<{#executable_target_embedded_elf_arm_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_arm_64.o"}>], #executable_target_embedded_elf_x86_64_ = [#hal.executable.object<{path = "samples/custom_dispatch/cpu/embedded/functions_x86_64.o"}>]}>} {
stream.executable.export public @simple_mul workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
stream.return %0, %c1, %c1 : index, index, index
}
stream.executable.export public @simple_mul_inplace workgroups(%arg0: index) -> (index, index, index) {
%c1 = arith.constant 1 : index
%0 = affine.apply #map()[%arg0]
hal.return %0, %c1, %c1 : index, index, index
}
builtin.module {
func.func private @simple_mul_workgroup(memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32) {
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%12 = arith.shli %11, %c32_i64_1 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg9 : i32 to i64
%16 = arith.extui %arg10 : i32 to i64
%c32_i64_2 = arith.constant 32 : i64
%17 = arith.shli %16, %c32_i64_2 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 : i64 to index
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%20 = affine.apply #map1()[%workgroup_id_0]
%21 = stream.binding.subspan %arg0[%4] : !stream.binding -> memref<?xf32>{%19}
%22 = stream.binding.subspan %arg1[%9] : !stream.binding -> memref<?xf32>{%19}
%23 = stream.binding.subspan %arg2[%14] : !stream.binding -> memref<?xf32>{%19}
call @simple_mul_workgroup(%21, %22, %23, %19, %20) : (memref<?xf32>, memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
func.func private @simple_mul_inplace_workgroup(memref<?xf32>, memref<?xf32>, index, index) attributes {hal.import.static}
func.func @simple_mul_inplace(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32) {
%0 = arith.extui %arg2 : i32 to i64
%1 = arith.extui %arg3 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg4 : i32 to i64
%6 = arith.extui %arg5 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.extui %arg7 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%12 = arith.shli %11, %c32_i64_1 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 : i64 to index
%c0 = arith.constant 0 : index
%workgroup_id_0 = stream.dispatch.workgroup.id[0] : index
%15 = affine.apply #map1()[%workgroup_id_0]
%16 = stream.binding.subspan %arg0[%4] : !stream.binding -> memref<?xf32>{%14}
%17 = stream.binding.subspan %arg1[%9] : !stream.binding -> memref<?xf32>{%14}
call @simple_mul_inplace_workgroup(%16, %17, %14, %15) : (memref<?xf32>, memref<?xf32>, index, index) -> ()
return
}
}
}
stream.executable private @mixed_invocation_dispatch_0 {
stream.executable.export public @mixed_invocation_dispatch_0_elementwise_D_f32 workgroups(%arg0: index, %arg1: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice %arg0, %arg1
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @mixed_invocation_dispatch_0_elementwise_D_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignme
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment