Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created October 3, 2024 15:48
Show Gist options
  • Save pashu123/c2b161f72ffc1756307346578b81003e to your computer and use it in GitHub Desktop.
Save pashu123/c2b161f72ffc1756307346578b81003e to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
// -----// IR Dump After CheckVHLOStableHloMixUsage (iree-check-vhlostablehlo-mix-usage) //----- //
module {
func.func @compare_i64() {
%0 = util.unfoldable_constant dense<1> : tensor<i64>
%1 = util.unfoldable_constant dense<5> : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.unfoldable_constant dense<0> : tensor<i8>
%4 = util.unfoldable_constant dense<1> : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq_const(%5, dense<0> : tensor<i8>) : tensor<i8>
return
}
}
// -----// IR Dump After VhloToVersionPass (vhlo-to-version) //----- //
module {
func.func @compare_i64() {
%0 = util.unfoldable_constant dense<1> : tensor<i64>
%1 = util.unfoldable_constant dense<5> : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.unfoldable_constant dense<0> : tensor<i8>
%4 = util.unfoldable_constant dense<1> : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq_const(%5, dense<0> : tensor<i8>) : tensor<i8>
return
}
}
// -----// IR Dump After VhloLegalizeToStablehloPass (vhlo-legalize-to-stablehlo) //----- //
module {
func.func @compare_i64() {
%0 = util.unfoldable_constant dense<1> : tensor<i64>
%1 = util.unfoldable_constant dense<5> : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.unfoldable_constant dense<0> : tensor<i8>
%4 = util.unfoldable_constant dense<1> : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq_const(%5, dense<0> : tensor<i8>) : tensor<i8>
return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After StableHLOCanonicalize (iree-stablehlo-canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After LegalizeStableHLOCustomCalls (iree-stablehlo-legalize-custom-calls) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After LegalizeControlFlow (iree-stablehlo-legalize-control-flow) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After FlattenTuplesInSCF (iree-stablehlo-preprocessing-flatten-scf-tuples) //----- //
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After StableHLOToStableHLOPreprocessing (iree-stablehlo-to-stablehlo-preprocessing) //----- //
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After StableHLOCanonicalize (iree-stablehlo-canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After ShapeToShapeLowering (shape-to-shape-lowering) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After ConvertShapeToStandard (convert-shape-to-std) //----- //
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After StableHLOCanonicalize (iree-stablehlo-canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After Inliner (inline) //----- //
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After StableHLOCanonicalize (iree-stablehlo-canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After CSE (cse) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After LegalizeShapeComputations (iree-stablehlo-legalize-shape-computations) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After ConvertStableHloToLinalgExt (iree-stablehlo-to-linalg-ext) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After LegalizeChlo (iree-stablehlo-legalize-chlo) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = stablehlo.compare EQ, %0, %1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
%3 = util.optimization_barrier %cst_0 : tensor<i8>
%4 = util.optimization_barrier %cst : tensor<i8>
%5 = stablehlo.select %2, %4, %3 : tensor<i1>, tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After ConvertStableHloToIreeInputDialects (iree-stablehlo-to-iree-input) //----- //
#map = affine_map<() -> ()>
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After ReconcileUnrealizedCasts (reconcile-unrealized-casts) //----- //
#map = affine_map<() -> ()>
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After StableHLOCanonicalize (iree-stablehlo-canonicalize) //----- //
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
return
}
// -----// IR Dump After VerifyCompilerStableHloInputLegality (iree-stablehlo-verify-compiler-input-legality) //----- //
#map = affine_map<() -> ()>
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- //
#map = affine_map<() -> ()>
module {
func.func @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
return
}
}
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Inliner (inline) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#map = affine_map<() -> ()>
module {
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {hal.device.targets = [#device_target_local]} {
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After Convert1X1FilterConv2DToMatmulPass (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After Convert1X1FilterConv2DToMatmulPass (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CPUMaterializeHostEncodingPass (iree-codegen-cpu-materialize-host-encoding) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = tensor.empty() : tensor<i1>
%3 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1 : tensor<i64>, tensor<i64>) outs(%2 : tensor<i1>) {
^bb0(%in: i64, %in_3: i64, %out: i1):
%8 = arith.cmpi eq, %in, %in_3 : i64
linalg.yield %8 : i1
} -> tensor<i1>
%4 = util.optimization_barrier %cst_0 : tensor<i8>
%5 = util.optimization_barrier %cst : tensor<i8>
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%3, %5, %4 : tensor<i1>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i1, %in_3: i8, %in_4: i8, %out: i8):
%8 = arith.select %in, %in_3, %in_4 : i8
linalg.yield %8 : i8
} -> tensor<i8>
check.expect_eq(%7, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_3 : i64
%7 = arith.select %6, %in_4, %in_5 : i8
linalg.yield %7 : i8
} -> tensor<i8>
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = flow.dispatch.region -> (tensor<i8>) {
%6 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%7 = arith.cmpi eq, %in, %in_3 : i64
%8 = arith.select %7, %in_4, %in_5 : i8
linalg.yield %8 : i8
} -> tensor<i8>
flow.return %6 : tensor<i8>
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = flow.dispatch.region -> (tensor<i8>) {
%6 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%7 = arith.cmpi eq, %in, %in_3 : i64
%8 = arith.select %7, %in_4, %in_5 : i8
linalg.yield %8 : i8
} -> tensor<i8>
flow.return %6 : tensor<i8>
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = flow.dispatch.region -> (tensor<i8>) {
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%8 = arith.cmpi eq, %in, %in_3 : i64
%9 = arith.select %8, %in_4, %in_5 : i8
linalg.yield %9 : i8
} -> tensor<i8>
flow.return %7 : tensor<i8>
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = flow.dispatch.region -> (tensor<i8>) {
%6 = tensor.empty() : tensor<i8>
%7 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %3, %2 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%6 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%8 = arith.cmpi eq, %in, %in_3 : i64
%9 = arith.select %8, %in_4, %in_5 : i8
linalg.yield %9 : i8
} -> tensor<i8>
flow.return %7 : tensor<i8>
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%6 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%8 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%10 = tensor.empty() : tensor<i8>
%11 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%6, %7, %8, %9 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%10 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%12 = arith.cmpi eq, %in, %in_3 : i64
%13 = arith.select %12, %in_4, %in_5 : i8
linalg.yield %13 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %11, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%5, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch.workgroups(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8> =
(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%5 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_3: i64, %in_4: i8, %in_5: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_3 : i64
%12 = arith.select %11, %in_4, %in_5 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
flow.return
} count() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0 workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- //
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
flow.executable private @_compare_i64_dispatch_0 {
flow.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
flow.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg1: !flow.dispatch.tensor<readonly:tensor<i64>>, %arg2: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg3: !flow.dispatch.tensor<readonly:tensor<i8>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<i8>>) {
%0 = flow.dispatch.tensor.load %arg0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%1 = flow.dispatch.tensor.load %arg1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%2 = flow.dispatch.tensor.load %arg2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%3 = flow.dispatch.tensor.load %arg3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%4 = tensor.empty() : tensor<i8>
%5 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%0, %1, %2, %3 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%4 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%6 = arith.cmpi eq, %in, %in_0 : i64
%7 = arith.select %6, %in_1, %in_2 : i8
linalg.yield %7 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %5, %arg4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = arith.constant dense<1> : tensor<i8>
%cst_0 = arith.constant dense<0> : tensor<i8>
%cst_1 = arith.constant dense<5> : tensor<i64>
%cst_2 = arith.constant dense<1> : tensor<i64>
%0 = util.optimization_barrier %cst_2 : tensor<i64>
%1 = util.optimization_barrier %cst_1 : tensor<i64>
%2 = util.optimization_barrier %cst_0 : tensor<i8>
%3 = util.optimization_barrier %cst : tensor<i8>
%4 = flow.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%0, %1, %3, %2) : (tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) -> tensor<i8>
check.expect_eq(%4, %cst_0) : tensor<i8>
util.return
}
}
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i8> in !stream.resource<constant> = dense<1> : tensor<i8>
%0 = stream.resource.size %cst : !stream.resource<constant>
%1 = stream.async.transfer %cst : !stream.resource<constant>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%cst_0 = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i8> in !stream.resource<constant> = dense<0> : tensor<i8>
%2 = stream.resource.size %cst_0 : !stream.resource<constant>
%3 = stream.async.transfer %cst_0 : !stream.resource<constant>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%cst_1 = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i64> in !stream.resource<constant> = dense<5> : tensor<i64>
%4 = stream.resource.size %cst_1 : !stream.resource<constant>
%5 = stream.async.transfer %cst_1 : !stream.resource<constant>{%4} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%4}
%cst_2 = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i64> in !stream.resource<constant> = dense<1> : tensor<i64>
%6 = stream.resource.size %cst_2 : !stream.resource<constant>
%7 = stream.async.transfer %cst_2 : !stream.resource<constant>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%8 = util.optimization_barrier %7 : !stream.resource<*>
%9 = util.optimization_barrier %5 : !stream.resource<*>
%10 = util.optimization_barrier %3 : !stream.resource<*>
%11 = util.optimization_barrier %1 : !stream.resource<*>
%c0 = arith.constant 0 : index
%12 = stream.resource.size %8 : !stream.resource<*>
%13 = stream.resource.size %9 : !stream.resource<*>
%14 = stream.resource.size %11 : !stream.resource<*>
%15 = stream.resource.size %10 : !stream.resource<*>
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%8[%c0 to %12 for %12], %9[%c0 to %13 for %13], %11[%c0 to %14 for %14], %10[%c0 to %15 for %15]) : (!stream.resource<*>{%12}, !stream.resource<*>{%13}, !stream.resource<*>{%14}, !stream.resource<*>{%15}) -> !stream.resource<*>{%16}
%18 = stream.async.transfer %17 : !stream.resource<*>{%16} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%16}
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<i8> in !stream.resource<external>{%16} -> tensor<i8>
%20 = stream.async.transfer %3 : !stream.resource<*>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%2}
%21 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %20 : tensor<i8> in !stream.resource<external>{%2} -> tensor<i8>
check.expect_eq(%19, %21) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%cst = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i8> in !stream.resource<constant> = dense<1> : tensor<i8>
%0 = stream.resource.size %cst : !stream.resource<constant>
%1 = stream.async.transfer %cst : !stream.resource<constant>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%cst_0 = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i8> in !stream.resource<constant> = dense<0> : tensor<i8>
%2 = stream.resource.size %cst_0 : !stream.resource<constant>
%3 = stream.async.transfer %cst_0 : !stream.resource<constant>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%2}
%cst_1 = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i64> in !stream.resource<constant> = dense<5> : tensor<i64>
%4 = stream.resource.size %cst_1 : !stream.resource<constant>
%5 = stream.async.transfer %cst_1 : !stream.resource<constant>{%4} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%4}
%cst_2 = stream.tensor.constant on(#hal.device.affinity<@__device_0>) : tensor<i64> in !stream.resource<constant> = dense<1> : tensor<i64>
%6 = stream.resource.size %cst_2 : !stream.resource<constant>
%7 = stream.async.transfer %cst_2 : !stream.resource<constant>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%8 = util.optimization_barrier %7 : !stream.resource<*>
%9 = util.optimization_barrier %5 : !stream.resource<*>
%10 = util.optimization_barrier %3 : !stream.resource<*>
%11 = util.optimization_barrier %1 : !stream.resource<*>
%c0 = arith.constant 0 : index
%12 = stream.resource.size %8 : !stream.resource<*>
%13 = stream.resource.size %9 : !stream.resource<*>
%14 = stream.resource.size %11 : !stream.resource<*>
%15 = stream.resource.size %10 : !stream.resource<*>
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%8[%c0 to %12 for %12], %9[%c0 to %13 for %13], %11[%c0 to %14 for %14], %10[%c0 to %15 for %15]) : (!stream.resource<*>{%12}, !stream.resource<*>{%13}, !stream.resource<*>{%14}, !stream.resource<*>{%15}) -> !stream.resource<*>{%16}
%18 = stream.async.transfer %17 : !stream.resource<*>{%16} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%16}
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<i8> in !stream.resource<external>{%16} -> tensor<i8>
%20 = stream.async.transfer %3 : !stream.resource<*>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%2}
%21 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %20 : tensor<i8> in !stream.resource<external>{%2} -> tensor<i8>
check.expect_eq(%19, %21) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%3 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%2}
%4 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%4}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%7 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%6}
%8 = util.optimization_barrier %7 : !stream.resource<*>
%9 = util.optimization_barrier %5 : !stream.resource<*>
%10 = util.optimization_barrier %3 : !stream.resource<*>
%11 = util.optimization_barrier %1 : !stream.resource<*>
%12 = stream.resource.size %8 : !stream.resource<*>
%13 = stream.resource.size %9 : !stream.resource<*>
%14 = stream.resource.size %11 : !stream.resource<*>
%15 = stream.resource.size %10 : !stream.resource<*>
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%8[%c0 to %12 for %12], %9[%c0 to %13 for %13], %11[%c0 to %14 for %14], %10[%c0 to %15 for %15]) : (!stream.resource<*>{%12}, !stream.resource<*>{%13}, !stream.resource<*>{%14}, !stream.resource<*>{%15}) -> !stream.resource<*>{%16}
%18 = stream.async.transfer %17 : !stream.resource<*>{%16} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%16}
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<i8> in !stream.resource<external>{%16} -> tensor<i8>
%20 = stream.async.transfer %3 : !stream.resource<*>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%2}
%21 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %20 : tensor<i8> in !stream.resource<external>{%2} -> tensor<i8>
check.expect_eq(%19, %21) : tensor<i8>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
// -----// IR Dump After Inliner (inline) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%3 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%2}
%4 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%4}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%7 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%6}
%8 = util.optimization_barrier %7 : !stream.resource<*>
%9 = util.optimization_barrier %5 : !stream.resource<*>
%10 = util.optimization_barrier %3 : !stream.resource<*>
%11 = util.optimization_barrier %1 : !stream.resource<*>
%12 = stream.resource.size %8 : !stream.resource<*>
%13 = stream.resource.size %9 : !stream.resource<*>
%14 = stream.resource.size %11 : !stream.resource<*>
%15 = stream.resource.size %10 : !stream.resource<*>
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%8[%c0 to %12 for %12], %9[%c0 to %13 for %13], %11[%c0 to %14 for %14], %10[%c0 to %15 for %15]) : (!stream.resource<*>{%12}, !stream.resource<*>{%13}, !stream.resource<*>{%14}, !stream.resource<*>{%15}) -> !stream.resource<*>{%16}
%18 = stream.async.transfer %17 : !stream.resource<*>{%16} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%16}
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<i8> in !stream.resource<external>{%16} -> tensor<i8>
%20 = stream.async.transfer %3 : !stream.resource<*>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%2}
%21 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %20 : tensor<i8> in !stream.resource<external>{%2} -> tensor<i8>
check.expect_eq(%19, %21) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%3 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%2}
%4 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%4}
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%7 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%6}
%8 = util.optimization_barrier %7 : !stream.resource<*>
%9 = util.optimization_barrier %5 : !stream.resource<*>
%10 = util.optimization_barrier %3 : !stream.resource<*>
%11 = util.optimization_barrier %1 : !stream.resource<*>
%12 = stream.resource.size %8 : !stream.resource<*>
%13 = stream.resource.size %9 : !stream.resource<*>
%14 = stream.resource.size %11 : !stream.resource<*>
%15 = stream.resource.size %10 : !stream.resource<*>
%16 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%17 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%8[%c0 to %12 for %12], %9[%c0 to %13 for %13], %11[%c0 to %14 for %14], %10[%c0 to %15 for %15]) : (!stream.resource<*>{%12}, !stream.resource<*>{%13}, !stream.resource<*>{%14}, !stream.resource<*>{%15}) -> !stream.resource<*>{%16}
%18 = stream.async.transfer %17 : !stream.resource<*>{%16} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%16}
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %18 : tensor<i8> in !stream.resource<external>{%16} -> tensor<i8>
%20 = stream.async.transfer %3 : !stream.resource<*>{%2} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%2}
%21 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %20 : tensor<i8> in !stream.resource<external>{%2} -> tensor<i8>
check.expect_eq(%19, %21) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%4 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%6 = util.optimization_barrier %5 : !stream.resource<*>
%7 = util.optimization_barrier %4 : !stream.resource<*>
%8 = util.optimization_barrier %2 : !stream.resource<*>
%9 = util.optimization_barrier %1 : !stream.resource<*>
%10 = stream.resource.size %6 : !stream.resource<*>
%11 = stream.resource.size %7 : !stream.resource<*>
%12 = stream.resource.size %9 : !stream.resource<*>
%13 = stream.resource.size %8 : !stream.resource<*>
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%6[%c0 to %10 for %10], %7[%c0 to %11 for %11], %9[%c0 to %12 for %12], %8[%c0 to %13 for %13]) : (!stream.resource<*>{%10}, !stream.resource<*>{%11}, !stream.resource<*>{%12}, !stream.resource<*>{%13}) -> !stream.resource<*>{%0}
%15 = stream.async.transfer %14 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
%17 = stream.async.transfer %2 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
check.expect_eq(%16, %18) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%4 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%6 = util.optimization_barrier %5 : !stream.resource<*>
%7 = util.optimization_barrier %4 : !stream.resource<*>
%8 = util.optimization_barrier %2 : !stream.resource<*>
%9 = util.optimization_barrier %1 : !stream.resource<*>
%10 = stream.resource.size %6 : !stream.resource<*>
%11 = stream.resource.size %7 : !stream.resource<*>
%12 = stream.resource.size %9 : !stream.resource<*>
%13 = stream.resource.size %8 : !stream.resource<*>
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%6[%c0 to %10 for %10], %7[%c0 to %11 for %11], %9[%c0 to %12 for %12], %8[%c0 to %13 for %13]) : (!stream.resource<*>{%10}, !stream.resource<*>{%11}, !stream.resource<*>{%12}, !stream.resource<*>{%13}) -> !stream.resource<*>{%0}
%15 = stream.async.transfer %14 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
%17 = stream.async.transfer %2 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
check.expect_eq(%16, %18) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%4 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%6 = util.optimization_barrier %5 : !stream.resource<*>
%7 = util.optimization_barrier %4 : !stream.resource<*>
%8 = util.optimization_barrier %2 : !stream.resource<*>
%9 = util.optimization_barrier %1 : !stream.resource<*>
%10 = stream.resource.size %6 : !stream.resource<*>
%11 = stream.resource.size %7 : !stream.resource<*>
%12 = stream.resource.size %9 : !stream.resource<*>
%13 = stream.resource.size %8 : !stream.resource<*>
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%6[%c0 to %10 for %10], %7[%c0 to %11 for %11], %9[%c0 to %12 for %12], %8[%c0 to %13 for %13]) : (!stream.resource<*>{%10}, !stream.resource<*>{%11}, !stream.resource<*>{%12}, !stream.resource<*>{%13}) -> !stream.resource<*>{%0}
%15 = stream.async.transfer %14 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
%17 = stream.async.transfer %2 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
check.expect_eq(%16, %18) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%4 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%6 = util.optimization_barrier %5 : !stream.resource<*>
%7 = util.optimization_barrier %4 : !stream.resource<*>
%8 = util.optimization_barrier %2 : !stream.resource<*>
%9 = util.optimization_barrier %1 : !stream.resource<*>
%10 = stream.resource.size %6 : !stream.resource<*>
%11 = stream.resource.size %7 : !stream.resource<*>
%12 = stream.resource.size %9 : !stream.resource<*>
%13 = stream.resource.size %8 : !stream.resource<*>
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%6[%c0 to %10 for %10], %7[%c0 to %11 for %11], %9[%c0 to %12 for %12], %8[%c0 to %13 for %13]) : (!stream.resource<*>{%10}, !stream.resource<*>{%11}, !stream.resource<*>{%12}, !stream.resource<*>{%13}) -> !stream.resource<*>{%0}
%15 = stream.async.transfer %14 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
%17 = stream.async.transfer %2 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
check.expect_eq(%16, %18) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%4 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%6 = util.optimization_barrier %5 : !stream.resource<*>
%7 = util.optimization_barrier %4 : !stream.resource<*>
%8 = util.optimization_barrier %2 : !stream.resource<*>
%9 = util.optimization_barrier %1 : !stream.resource<*>
%10 = stream.resource.size %6 : !stream.resource<*>
%11 = stream.resource.size %7 : !stream.resource<*>
%12 = stream.resource.size %9 : !stream.resource<*>
%13 = stream.resource.size %8 : !stream.resource<*>
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%6[%c0 to %10 for %10], %7[%c0 to %11 for %11], %9[%c0 to %12 for %12], %8[%c0 to %13 for %13]) : (!stream.resource<*>{%10}, !stream.resource<*>{%11}, !stream.resource<*>{%12}, !stream.resource<*>{%13}) -> !stream.resource<*>{%0}
%15 = stream.async.transfer %14 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
%17 = stream.async.transfer %2 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
check.expect_eq(%16, %18) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%4 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%6 = util.optimization_barrier %5 : !stream.resource<*>
%7 = util.optimization_barrier %4 : !stream.resource<*>
%8 = util.optimization_barrier %2 : !stream.resource<*>
%9 = util.optimization_barrier %1 : !stream.resource<*>
%10 = stream.resource.size %6 : !stream.resource<*>
%11 = stream.resource.size %7 : !stream.resource<*>
%12 = stream.resource.size %9 : !stream.resource<*>
%13 = stream.resource.size %8 : !stream.resource<*>
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%6[%c0 to %10 for %10], %7[%c0 to %11 for %11], %9[%c0 to %12 for %12], %8[%c0 to %13 for %13]) : (!stream.resource<*>{%10}, !stream.resource<*>{%11}, !stream.resource<*>{%12}, !stream.resource<*>{%13}) -> !stream.resource<*>{%0}
%15 = stream.async.transfer %14 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
%17 = stream.async.transfer %2 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
check.expect_eq(%16, %18) : tensor<i8>
util.return
}
}
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i8> : index
%1 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%2 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> tensor<i8> in !stream.resource<*>{%0}
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<i64> : index
%4 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%5 = stream.tensor.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> tensor<i64> in !stream.resource<*>{%3}
%6 = util.optimization_barrier %5 : !stream.resource<*>
%7 = util.optimization_barrier %4 : !stream.resource<*>
%8 = util.optimization_barrier %2 : !stream.resource<*>
%9 = util.optimization_barrier %1 : !stream.resource<*>
%10 = stream.resource.size %6 : !stream.resource<*>
%11 = stream.resource.size %7 : !stream.resource<*>
%12 = stream.resource.size %9 : !stream.resource<*>
%13 = stream.resource.size %8 : !stream.resource<*>
%14 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%6[%c0 to %10 for %10], %7[%c0 to %11 for %11], %9[%c0 to %12 for %12], %8[%c0 to %13 for %13]) : (!stream.resource<*>{%10}, !stream.resource<*>{%11}, !stream.resource<*>{%12}, !stream.resource<*>{%13}) -> !stream.resource<*>{%0}
%15 = stream.async.transfer %14 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
%17 = stream.async.transfer %2 : !stream.resource<*>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%0}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%0} -> tensor<i8>
check.expect_eq(%16, %18) : tensor<i8>
util.return
}
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%1 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%4 = util.optimization_barrier %3 : !stream.resource<*>
%5 = util.optimization_barrier %2 : !stream.resource<*>
%6 = util.optimization_barrier %1 : !stream.resource<*>
%7 = util.optimization_barrier %0 : !stream.resource<*>
%8 = stream.resource.size %4 : !stream.resource<*>
%9 = stream.resource.size %5 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %6 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%4[%c0 to %8 for %8], %5[%c0 to %9 for %9], %7[%c0 to %10 for %10], %6[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %1 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<*>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<*>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<*>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<*>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<*>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<*>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<*>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<*>
%8 = stream.resource.size %1 : !stream.resource<*>
%9 = stream.resource.size %3 : !stream.resource<*>
%10 = stream.resource.size %7 : !stream.resource<*>
%11 = stream.resource.size %5 : !stream.resource<*>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<*>{%8}, !stream.resource<*>{%9}, !stream.resource<*>{%10}, !stream.resource<*>{%11}) -> !stream.resource<*>{%c1}
%13 = stream.async.transfer %12 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %13 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%15 = stream.async.transfer %4 : !stream.resource<*>{%c1} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c1}
%16 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %15 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%14, %16) : tensor<i8>
util.return
}
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%0 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i64 : i64 -> !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%2 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c5_i64 : i64 -> !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%4 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c0_i8 : i8 -> !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%6 = stream.async.splat on(#hal.device.affinity<@__device_0>) %c1_i8 : i8 -> !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%1[%c0 to %8 for %8], %3[%c0 to %9 for %9], %7[%c0 to %10 for %10], %5[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.splat %c1_i64 : i64 -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.splat %c5_i64 : i64 -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.splat %c1_i64 : i64 -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.splat %c5_i64 : i64 -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%20 = stream.async.splat %c1_i64 : i64 -> !stream.resource<transient>{%c8}
stream.yield %20 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%20 = stream.async.splat %c5_i64 : i64 -> !stream.resource<transient>{%c8}
stream.yield %20 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%20 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %20 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%20 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %20 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.timepoint.immediate => !stream.timepoint
%13 = stream.timepoint.immediate => !stream.timepoint
%14 = stream.timepoint.immediate => !stream.timepoint
%15 = stream.timepoint.immediate => !stream.timepoint
%16 = stream.timepoint.join max(%12, %13, %14, %15) => !stream.timepoint
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%16) => with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%20 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %20 : !stream.resource<external>{%c1}
} => !stream.timepoint
%17 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%c8_0 = arith.constant 8 : index
%c1_1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%20 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1_1](%c1_i64, %c1_1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %20 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%c8_2 = arith.constant 8 : index
%c1_3 = arith.constant 1 : index
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%20 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1_3](%c5_i64, %c1_3) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %20 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%20 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %20 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_8, %result_timepoint_9 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%20 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %20 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_9 => %results_8 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%12 = stream.timepoint.immediate => !stream.timepoint
%13 = stream.timepoint.immediate => !stream.timepoint
%14 = stream.timepoint.immediate => !stream.timepoint
%15 = stream.timepoint.immediate => !stream.timepoint
%16 = stream.timepoint.join max(%12, %13, %14, %15) => !stream.timepoint
%results_10, %result_timepoint_11 = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%16) => with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%20 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %20 : !stream.resource<external>{%c1}
} => !stream.timepoint
%17 = stream.timepoint.await %result_timepoint_11 => %results_10 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%0 = stream.timepoint.await %result_timepoint => %results : !stream.resource<transient>{%c8}
%1 = util.optimization_barrier %0 : !stream.resource<transient>
%results_0, %result_timepoint_1 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c8} {
%15 = stream.async.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1) : (i64, index) -> !stream.resource<transient>{%c8}
stream.yield %15 : !stream.resource<transient>{%c8}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint_1 => %results_0 : !stream.resource<transient>{%c8}
%3 = util.optimization_barrier %2 : !stream.resource<transient>
%results_2, %result_timepoint_3 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<external>{%c1} {
%15 = stream.async.splat %c0_i8 : i8 -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%4 = stream.timepoint.await %result_timepoint_3 => %results_2 : !stream.resource<external>{%c1}
%5 = util.optimization_barrier %4 : !stream.resource<external>
%results_4, %result_timepoint_5 = stream.async.execute on(#hal.device.affinity<@__device_0>) with() -> !stream.resource<transient>{%c1} {
%15 = stream.async.splat %c1_i8 : i8 -> !stream.resource<transient>{%c1}
stream.yield %15 : !stream.resource<transient>{%c1}
} => !stream.timepoint
%6 = stream.timepoint.await %result_timepoint_5 => %results_4 : !stream.resource<transient>{%c1}
%7 = util.optimization_barrier %6 : !stream.resource<transient>
%8 = stream.resource.size %1 : !stream.resource<transient>
%9 = stream.resource.size %3 : !stream.resource<transient>
%10 = stream.resource.size %7 : !stream.resource<transient>
%11 = stream.resource.size %5 : !stream.resource<external>
%results_6, %result_timepoint_7 = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg0: !stream.resource<transient>{%8}, %3 as %arg1: !stream.resource<transient>{%9}, %7 as %arg2: !stream.resource<transient>{%10}, %5 as %arg3: !stream.resource<external>{%11}) -> !stream.resource<external>{%c1} {
%15 = stream.async.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%arg0[%c0 to %8 for %8], %arg1[%c0 to %9 for %9], %arg2[%c0 to %10 for %10], %arg3[%c0 to %11 for %11]) : (!stream.resource<transient>{%8}, !stream.resource<transient>{%9}, !stream.resource<transient>{%10}, !stream.resource<external>{%11}) -> !stream.resource<external>{%c1}
stream.yield %15 : !stream.resource<external>{%c1}
} => !stream.timepoint
%12 = stream.timepoint.await %result_timepoint_7 => %results_6 : !stream.resource<external>{%c1}
%13 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %12 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%14 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%13, %14) : tensor<i8>
util.return
}
}
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0_0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_3 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_2) => with(%result_1 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0_3 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_1 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%c0_6 = arith.constant 0 : index
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0_6 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_4 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_7, %result_timepoint_8 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%c0_9 = arith.constant 0 : index
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_8) => with(%result_7 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0_9 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_7 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%c0_10 = arith.constant 0 : index
%result_11, %result_timepoint_12 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_12) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_11 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0_10 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_11 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0_0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_3 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_2) => with(%result_1 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0_3 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_1 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%c0_6 = arith.constant 0 : index
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0_6 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_4 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_7, %result_timepoint_8 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%c0_9 = arith.constant 0 : index
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_8) => with(%result_7 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0_9 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_7 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%c0_10 = arith.constant 0 : index
%result_11, %result_timepoint_12 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_12) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_11 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0_10 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_11 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0_0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_3 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_2) => with(%result_1 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0_3 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_1 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%c0_6 = arith.constant 0 : index
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0_6 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_4 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_7, %result_timepoint_8 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%c0_9 = arith.constant 0 : index
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_8) => with(%result_7 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0_9 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_7 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%c0_10 = arith.constant 0 : index
%result_11, %result_timepoint_12 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_12) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_11 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0_10 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_11 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After PropagateSubranges (iree-util-propagate-subranges) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0_0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_3 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_2) => with(%result_1 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0_3 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_1 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%c0_6 = arith.constant 0 : index
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0_6 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_4 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_7, %result_timepoint_8 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%c0_9 = arith.constant 0 : index
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_8) => with(%result_7 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0_9 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_7 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%c0_10 = arith.constant 0 : index
%result_11, %result_timepoint_12 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_12) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_11 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0_10 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_11 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: i64, %arg1: index, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
%1 = tensor.empty(%arg1) : tensor<?xi64>
%2 = linalg.fill ins(%arg0 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg1], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i64, %c1 : i64, index) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding, %arg1: index, %arg2: i64, %arg3: index) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg1] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg3}
%1 = tensor.empty(%arg3) : tensor<?xi64>
%2 = linalg.fill ins(%arg2 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg3], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg3}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: !stream.binding, %arg5: index, %arg6: index, %arg7: index, %arg8: index, %arg9: index) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg5] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%arg6] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%arg7] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%arg8] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%arg9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0, %c1_i64, %c1 : index, i64, index) {
wo %arg0[%c0_0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_3 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_2) => with(%result_1 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0, %c5_i64, %c1 : index, i64, index) {
wo %arg0[%c0_3 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_1 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_4 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%result_6 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_6 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_8, %result_timepoint_9 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%c0_10 = arith.constant 0 : index
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_9) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_8 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0, %c0, %c0, %c0, %c0 : index, index, index, index, index) {
ro %arg0[%c0_10 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0_10 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0_10 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0_10 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0_10 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_8 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: index {stream.values = [0 : index]}, %arg2: i64 {stream.values = [1, 5]}, %arg3: index {stream.values = [1 : index]}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg1] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg3}
%1 = tensor.empty(%arg3) : tensor<?xi64>
%2 = linalg.fill ins(%arg2 : i64) outs(%1 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %2, %0, offsets = [0], sizes = [%arg3], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%arg3}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}, %arg5: index {stream.values = [0 : index]}, %arg6: index {stream.values = [0 : index]}, %arg7: index {stream.values = [0 : index]}, %arg8: index {stream.values = [0 : index]}, %arg9: index {stream.values = [0 : index]}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg5] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%arg6] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%arg7] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%arg8] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%arg9] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0, %c1_i64, %c1 : index, i64, index) {
wo %arg0[%c0_0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_1, %result_timepoint_2 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_3 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_2) => with(%result_1 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0, %c5_i64, %c1 : index, i64, index) {
wo %arg0[%c0_3 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_1 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_4 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%result_6 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_6 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_8, %result_timepoint_9 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%c0_10 = arith.constant 0 : index
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_9) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_8 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0, %c0, %c0, %c0, %c0 : index, index, index, index, index) {
ro %arg0[%c0_10 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0_10 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0_10 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0_10 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0_10 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_8 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32) {
%0 = arith.extui %arg1 : i32 to i64
%1 = arith.extui %arg2 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg3 : i32 to i64
%6 = arith.extui %arg4 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 {stream.values = [1, 5]} : i64
%9 = arith.extui %arg5 : i32 to i64
%10 = arith.extui %arg6 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%11 = arith.shli %10, %c32_i64_1 : i64
%12 = arith.ori %9, %11 : i64
%13 = arith.index_castui %12 {stream.values = [1 : index]} : i64 to index
%c0 = arith.constant 0 : index
%14 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
%15 = tensor.empty(%13) : tensor<?xi64>
%16 = linalg.fill ins(%8 : i64) outs(%15 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %16, %14, offsets = [0], sizes = [%13], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32, %arg12: i32, %arg13: i32, %arg14: i32) {
%0 = arith.extui %arg5 : i32 to i64
%1 = arith.extui %arg6 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg7 : i32 to i64
%6 = arith.extui %arg8 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg9 : i32 to i64
%11 = arith.extui %arg10 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%12 = arith.shli %11, %c32_i64_1 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg11 : i32 to i64
%16 = arith.extui %arg12 : i32 to i64
%c32_i64_2 = arith.constant 32 : i64
%17 = arith.shli %16, %c32_i64_2 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20 = arith.extui %arg13 : i32 to i64
%21 = arith.extui %arg14 : i32 to i64
%c32_i64_3 = arith.constant 32 : i64
%22 = arith.shli %21, %c32_i64_3 : i64
%23 = arith.ori %20, %22 : i64
%24 = arith.index_castui %23 {stream.values = [0 : index]} : i64 to index
%c0 = arith.constant 0 : index
%25 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%26 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%27 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%28 = stream.binding.subspan %arg3[%19] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%29 = stream.binding.subspan %arg4[%24] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%30 = flow.dispatch.tensor.load %25, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%31 = flow.dispatch.tensor.load %26, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%32 = flow.dispatch.tensor.load %27, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%33 = flow.dispatch.tensor.load %28, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%34 = tensor.empty() : tensor<i8>
%35 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%30, %31, %32, %33 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%34 : tensor<i8>) {
^bb0(%in: i64, %in_4: i64, %in_5: i8, %in_6: i8, %out: i8):
%36 = arith.cmpi eq, %in, %in_4 : i64
%37 = arith.select %36, %in_5, %in_6 : i8
linalg.yield %37 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %35, %29, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c8 = arith.constant 8 : index
%c1_i64 = arith.constant 1 : i64
%c5_i64 = arith.constant 5 : i64
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%c0_i64 = arith.constant 0 : i64
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%c0_i64_1 = arith.constant 0 : i64
%c0_i32_2 = arith.constant 0 : i32
%c1_i32 = arith.constant 1 : i32
%c32_i64_3 = arith.constant 32 : i64
%c0_i64_4 = arith.constant 0 : i64
%c0_i32_5 = arith.constant 0 : i32
%c1_i64_6 = arith.constant 1 : i64
%c1_i32_7 = arith.constant 1 : i32
%c32_i64_8 = arith.constant 32 : i64
%c0_i64_9 = arith.constant 0 : i64
%c0_i32_10 = arith.constant 0 : i32
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32_2, %c1_i32, %c0_i32_5, %c1_i32_7, %c0_i32_10 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0_0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_11, %result_timepoint_12 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%c0_13 = arith.constant 0 : index
%c0_i64_14 = arith.constant 0 : i64
%c0_i32_15 = arith.constant 0 : i32
%c32_i64_16 = arith.constant 32 : i64
%c0_i64_17 = arith.constant 0 : i64
%c0_i32_18 = arith.constant 0 : i32
%c5_i32 = arith.constant 5 : i32
%c32_i64_19 = arith.constant 32 : i64
%c0_i64_20 = arith.constant 0 : i64
%c0_i32_21 = arith.constant 0 : i32
%c1_i64_22 = arith.constant 1 : i64
%c1_i32_23 = arith.constant 1 : i32
%c32_i64_24 = arith.constant 32 : i64
%c0_i64_25 = arith.constant 0 : i64
%c0_i32_26 = arith.constant 0 : i32
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_12) => with(%result_11 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32_15, %c0_i32_18, %c5_i32, %c0_i32_21, %c1_i32_23, %c0_i32_26 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0_13 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_11 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_27, %result_timepoint_28 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_28) => with(%result_27 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_27 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_29, %result_timepoint_30 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_30) => with(%result_29 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_29 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_31, %result_timepoint_32 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%c0_33 = arith.constant 0 : index
%c0_i64_34 = arith.constant 0 : i64
%c0_i32_35 = arith.constant 0 : i32
%c32_i64_36 = arith.constant 32 : i64
%c0_i64_37 = arith.constant 0 : i64
%c0_i32_38 = arith.constant 0 : i32
%c0_i64_39 = arith.constant 0 : i64
%c0_i32_40 = arith.constant 0 : i32
%c32_i64_41 = arith.constant 32 : i64
%c0_i64_42 = arith.constant 0 : i64
%c0_i32_43 = arith.constant 0 : i32
%c0_i64_44 = arith.constant 0 : i64
%c0_i32_45 = arith.constant 0 : i32
%c32_i64_46 = arith.constant 32 : i64
%c0_i64_47 = arith.constant 0 : i64
%c0_i32_48 = arith.constant 0 : i32
%c0_i64_49 = arith.constant 0 : i64
%c0_i32_50 = arith.constant 0 : i32
%c32_i64_51 = arith.constant 32 : i64
%c0_i64_52 = arith.constant 0 : i64
%c0_i32_53 = arith.constant 0 : i32
%c0_i64_54 = arith.constant 0 : i64
%c0_i32_55 = arith.constant 0 : i32
%c32_i64_56 = arith.constant 32 : i64
%c0_i64_57 = arith.constant 0 : i64
%c0_i32_58 = arith.constant 0 : i32
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_32) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_31 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32_35, %c0_i32_38, %c0_i32_40, %c0_i32_43, %c0_i32_45, %c0_i32_48, %c0_i32_50, %c0_i32_53, %c0_i32_55, %c0_i32_58 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0_33 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0_33 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0_33 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0_33 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0_33 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_31 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c1_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c5_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c1_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c5_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c1_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c5_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg1 : i32 to i64
%1 = arith.extui %arg2 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg3 : i32 to i64
%6 = arith.extui %arg4 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 {stream.values = [1, 5]} : i64
%9 = arith.extui %arg5 : i32 to i64
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.shli %10, %c32_i64 : i64
%12 = arith.ori %9, %11 : i64
%13 = arith.index_castui %12 {stream.values = [1 : index]} : i64 to index
%14 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
%15 = tensor.empty(%13) : tensor<?xi64>
%16 = linalg.fill ins(%8 : i64) outs(%15 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %16, %14, offsets = [0], sizes = [%13], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32, %arg12: i32, %arg13: i32, %arg14: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg5 : i32 to i64
%1 = arith.extui %arg6 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg7 : i32 to i64
%6 = arith.extui %arg8 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg9 : i32 to i64
%11 = arith.extui %arg10 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg11 : i32 to i64
%16 = arith.extui %arg12 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20 = arith.extui %arg13 : i32 to i64
%21 = arith.extui %arg14 : i32 to i64
%22 = arith.shli %21, %c32_i64 : i64
%23 = arith.ori %20, %22 : i64
%24 = arith.index_castui %23 {stream.values = [0 : index]} : i64 to index
%25 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%26 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%27 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%28 = stream.binding.subspan %arg3[%19] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%29 = stream.binding.subspan %arg4[%24] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%30 = flow.dispatch.tensor.load %25, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%31 = flow.dispatch.tensor.load %26, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%32 = flow.dispatch.tensor.load %27, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%33 = flow.dispatch.tensor.load %28, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%34 = tensor.empty() : tensor<i8>
%35 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%30, %31, %32, %33 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%34 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%36 = arith.cmpi eq, %in, %in_0 : i64
%37 = arith.select %36, %in_1, %in_2 : i8
linalg.yield %37 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %35, %29, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c1_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c5_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg1 : i32 to i64
%1 = arith.extui %arg2 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg3 : i32 to i64
%6 = arith.extui %arg4 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 {stream.values = [1, 5]} : i64
%9 = arith.extui %arg5 : i32 to i64
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.shli %10, %c32_i64 : i64
%12 = arith.ori %9, %11 : i64
%13 = arith.index_castui %12 {stream.values = [1 : index]} : i64 to index
%14 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
%15 = tensor.empty(%13) : tensor<?xi64>
%16 = linalg.fill ins(%8 : i64) outs(%15 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %16, %14, offsets = [0], sizes = [%13], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32, %arg12: i32, %arg13: i32, %arg14: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg5 : i32 to i64
%1 = arith.extui %arg6 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg7 : i32 to i64
%6 = arith.extui %arg8 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg9 : i32 to i64
%11 = arith.extui %arg10 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg11 : i32 to i64
%16 = arith.extui %arg12 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20 = arith.extui %arg13 : i32 to i64
%21 = arith.extui %arg14 : i32 to i64
%22 = arith.shli %21, %c32_i64 : i64
%23 = arith.ori %20, %22 : i64
%24 = arith.index_castui %23 {stream.values = [0 : index]} : i64 to index
%25 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%26 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%27 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%28 = stream.binding.subspan %arg3[%19] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%29 = stream.binding.subspan %arg4[%24] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%30 = flow.dispatch.tensor.load %25, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%31 = flow.dispatch.tensor.load %26, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%32 = flow.dispatch.tensor.load %27, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%33 = flow.dispatch.tensor.load %28, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%34 = tensor.empty() : tensor<i8>
%35 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%30, %31, %32, %33 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%34 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%36 = arith.cmpi eq, %in, %in_0 : i64
%37 = arith.select %36, %in_1, %in_2 : i8
linalg.yield %37 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %35, %29, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c1_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c5_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg1 : i32 to i64
%1 = arith.extui %arg2 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg3 : i32 to i64
%6 = arith.extui %arg4 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 {stream.values = [1, 5]} : i64
%9 = arith.extui %arg5 : i32 to i64
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.shli %10, %c32_i64 : i64
%12 = arith.ori %9, %11 : i64
%13 = arith.index_castui %12 {stream.values = [1 : index]} : i64 to index
%14 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
%15 = tensor.empty(%13) : tensor<?xi64>
%16 = linalg.fill ins(%8 : i64) outs(%15 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %16, %14, offsets = [0], sizes = [%13], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32, %arg12: i32, %arg13: i32, %arg14: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg5 : i32 to i64
%1 = arith.extui %arg6 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg7 : i32 to i64
%6 = arith.extui %arg8 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg9 : i32 to i64
%11 = arith.extui %arg10 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg11 : i32 to i64
%16 = arith.extui %arg12 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20 = arith.extui %arg13 : i32 to i64
%21 = arith.extui %arg14 : i32 to i64
%22 = arith.shli %21, %c32_i64 : i64
%23 = arith.ori %20, %22 : i64
%24 = arith.index_castui %23 {stream.values = [0 : index]} : i64 to index
%25 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%26 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%27 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%28 = stream.binding.subspan %arg3[%19] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%29 = stream.binding.subspan %arg4[%24] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%30 = flow.dispatch.tensor.load %25, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%31 = flow.dispatch.tensor.load %26, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%32 = flow.dispatch.tensor.load %27, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%33 = flow.dispatch.tensor.load %28, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%34 = tensor.empty() : tensor<i8>
%35 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%30, %31, %32, %33 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%34 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%36 = arith.cmpi eq, %in, %in_0 : i64
%37 = arith.select %36, %in_1, %in_2 : i8
linalg.yield %37 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %35, %29, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c1_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c5_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg1 : i32 to i64
%1 = arith.extui %arg2 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg3 : i32 to i64
%6 = arith.extui %arg4 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 {stream.values = [1, 5]} : i64
%9 = arith.extui %arg5 : i32 to i64
%10 = arith.extui %arg6 : i32 to i64
%11 = arith.shli %10, %c32_i64 : i64
%12 = arith.ori %9, %11 : i64
%13 = arith.index_castui %12 {stream.values = [1 : index]} : i64 to index
%14 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
%15 = tensor.empty(%13) : tensor<?xi64>
%16 = linalg.fill ins(%8 : i64) outs(%15 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %16, %14, offsets = [0], sizes = [%13], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32, %arg12: i32, %arg13: i32, %arg14: i32) {
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg5 : i32 to i64
%1 = arith.extui %arg6 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg7 : i32 to i64
%6 = arith.extui %arg8 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg9 : i32 to i64
%11 = arith.extui %arg10 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg11 : i32 to i64
%16 = arith.extui %arg12 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20 = arith.extui %arg13 : i32 to i64
%21 = arith.extui %arg14 : i32 to i64
%22 = arith.shli %21, %c32_i64 : i64
%23 = arith.ori %20, %22 : i64
%24 = arith.index_castui %23 {stream.values = [0 : index]} : i64 to index
%25 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%26 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%27 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%28 = stream.binding.subspan %arg3[%19] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%29 = stream.binding.subspan %arg4[%24] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%30 = flow.dispatch.tensor.load %25, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%31 = flow.dispatch.tensor.load %26, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%32 = flow.dispatch.tensor.load %27, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%33 = flow.dispatch.tensor.load %28, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%34 = tensor.empty() : tensor<i8>
%35 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%30, %31, %32, %33 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%34 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%36 = arith.cmpi eq, %in, %in_0 : i64
%37 = arith.select %36, %in_1, %in_2 : i8
linalg.yield %37 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %35, %29, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c1_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c0_i32, %c0_i32, %c5_i32, %c0_i32, %c1_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c0_i32 = arith.constant 0 : i32
%c1_i32 = arith.constant 1 : i32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %c0_i32 : i32 to i64
%1 = arith.extui %c0_i32 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg1 : i32 to i64
%6 = arith.extui %c0_i32 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 {stream.values = [1, 5]} : i64
%9 = arith.extui %c1_i32 : i32 to i64
%10 = arith.extui %c0_i32 : i32 to i64
%11 = arith.shli %10, %c32_i64 : i64
%12 = arith.ori %9, %11 : i64
%13 = arith.index_castui %12 {stream.values = [1 : index]} : i64 to index
%14 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
%15 = tensor.empty(%13) : tensor<?xi64>
%16 = linalg.fill ins(%8 : i64) outs(%15 : tensor<?xi64>) -> tensor<?xi64>
flow.dispatch.tensor.store %16, %14, offsets = [0], sizes = [%13], strides = [1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%13}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %c0_i32 : i32 to i64
%1 = arith.extui %c0_i32 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %c0_i32 : i32 to i64
%6 = arith.extui %c0_i32 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %c0_i32 : i32 to i64
%11 = arith.extui %c0_i32 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %c0_i32 : i32 to i64
%16 = arith.extui %c0_i32 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20 = arith.extui %c0_i32 : i32 to i64
%21 = arith.extui %c0_i32 : i32 to i64
%22 = arith.shli %21, %c32_i64 : i64
%23 = arith.ori %20, %22 : i64
%24 = arith.index_castui %23 {stream.values = [0 : index]} : i64 to index
%25 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%26 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%27 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%28 = stream.binding.subspan %arg3[%19] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%29 = stream.binding.subspan %arg4[%24] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%30 = flow.dispatch.tensor.load %25, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%31 = flow.dispatch.tensor.load %26, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%32 = flow.dispatch.tensor.load %27, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%33 = flow.dispatch.tensor.load %28, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%34 = tensor.empty() : tensor<i8>
%35 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%30, %31, %32, %33 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%34 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%36 = arith.cmpi eq, %in, %in_0 : i64
%37 = arith.select %36, %in_1, %in_2 : i8
linalg.yield %37 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %35, %29, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After CSE (cse) //----- //
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result_0 : !stream.resource<transient>{%c8}
%5 = util.optimization_barrier %4 : !stream.resource<transient>
%result_2, %result_timepoint_3 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%6 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_3) => with(%result_2 as %arg0: !stream.resource<external>{%c1}) {
stream.cmd.fill %c0_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<external>{%c1}
} => !stream.timepoint
%7 = stream.timepoint.await %6 => %result_2 : !stream.resource<external>{%c1}
%8 = util.optimization_barrier %7 : !stream.resource<external>
%result_4, %result_timepoint_5 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c1} => !stream.timepoint
%9 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_5) => with(%result_4 as %arg0: !stream.resource<transient>{%c1}) {
stream.cmd.fill %c1_i8, %arg0[%c0 for %c1] : i8 -> !stream.resource<transient>{%c1}
} => !stream.timepoint
%10 = stream.timepoint.await %9 => %result_4 : !stream.resource<transient>{%c1}
%11 = util.optimization_barrier %10 : !stream.resource<transient>
%12 = stream.resource.size %2 : !stream.resource<transient>
%13 = stream.resource.size %5 : !stream.resource<transient>
%14 = stream.resource.size %11 : !stream.resource<transient>
%15 = stream.resource.size %8 : !stream.resource<external>
%result_6, %result_timepoint_7 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c1} => !stream.timepoint
%16 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_7) => with(%2 as %arg0: !stream.resource<transient>{%12}, %5 as %arg1: !stream.resource<transient>{%13}, %11 as %arg2: !stream.resource<transient>{%14}, %8 as %arg3: !stream.resource<external>{%15}, %result_6 as %arg4: !stream.resource<external>{%c1}) {
stream.cmd.dispatch @_compare_i64_dispatch_0::@_compare_i64_dispatch_0_elementwise {
ro %arg0[%c0 for %12] : !stream.resource<transient>{%12},
ro %arg1[%c0 for %13] : !stream.resource<transient>{%13},
ro %arg2[%c0 for %14] : !stream.resource<transient>{%14},
ro %arg3[%c0 for %15] : !stream.resource<external>{%15},
wo %arg4[%c0 for %c1] : !stream.resource<external>{%c1}
}
} => !stream.timepoint
%17 = stream.timepoint.await %16 => %result_6 : !stream.resource<external>{%c1}
%18 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %17 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
%19 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<i8> in !stream.resource<external>{%c1} -> tensor<i8>
check.expect_eq(%18, %19) : tensor<i8>
util.return
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "generic", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<() -> ()>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @__builtin_splat_i64 {
stream.executable.export public @__builtin_splat_i64 workgroups(%arg0: index) -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_dag_root %arg0
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @__builtin_splat_i64(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: i32) {
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = arith.extui %arg1 : i32 to i64
%1 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
%2 = tensor.empty() : tensor<1xi64>
%3 = linalg.fill ins(%0 : i64) outs(%2 : tensor<1xi64>) -> tensor<1xi64>
flow.dispatch.tensor.store %3, %1, offsets = [0], sizes = [1], strides = [1] : tensor<1xi64> -> !flow.dispatch.tensor<writeonly:tensor<?xi64>>{%c1}
return
}
}
}
util.func public @compare_i64() attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @compare_i64() -> ()"}} {
util.call @_compare_i64() : () -> ()
util.return
}
stream.executable private @_compare_i64_dispatch_0 {
stream.executable.export public @_compare_i64_dispatch_0_elementwise workgroups() -> (index, index, index) {
%c1 = arith.constant 1 : index
stream.return %c1, %c1, %c1 : index, index, index
}
builtin.module {
func.func @_compare_i64_dispatch_0_elementwise(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: !stream.binding {stream.alignment = 64 : index}) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i64>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<i8>>
%4 = stream.binding.subspan %arg4[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<i8>>
%5 = flow.dispatch.tensor.load %0, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%6 = flow.dispatch.tensor.load %1, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i64>> -> tensor<i64>
%7 = flow.dispatch.tensor.load %2, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%8 = flow.dispatch.tensor.load %3, offsets = [], sizes = [], strides = [] : !flow.dispatch.tensor<readonly:tensor<i8>> -> tensor<i8>
%9 = tensor.empty() : tensor<i8>
%10 = linalg.generic {indexing_maps = [#map, #map, #map, #map, #map], iterator_types = []} ins(%5, %6, %7, %8 : tensor<i64>, tensor<i64>, tensor<i8>, tensor<i8>) outs(%9 : tensor<i8>) {
^bb0(%in: i64, %in_0: i64, %in_1: i8, %in_2: i8, %out: i8):
%11 = arith.cmpi eq, %in, %in_0 : i64
%12 = arith.select %11, %in_1, %in_2 : i8
linalg.yield %12 : i8
} -> tensor<i8>
flow.dispatch.tensor.store %10, %4, offsets = [], sizes = [], strides = [] : tensor<i8> -> !flow.dispatch.tensor<writeonly:tensor<i8>>
return
}
}
}
util.func private @_compare_i64() {
%c5_i32 = arith.constant 5 : i32
%c1_i32 = arith.constant 1 : i32
%c8 = arith.constant 8 : index
%c0_i8 = arith.constant 0 : i8
%c0 = arith.constant 0 : index
%c1_i8 = arith.constant 1 : i8
%c1 = arith.constant 1 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%0 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%result as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c1_i32 : i32) {
wo %arg0[%c0 for %c8] : !stream.resource<transient>{%c8}
}
} => !stream.timepoint
%1 = stream.timepoint.await %0 => %result : !stream.resource<transient>{%c8}
%2 = util.optimization_barrier %1 : !stream.resource<transient>
%result_0, %result_timepoint_1 = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<transient>{%c8} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint_1) => with(%result_0 as %arg0: !stream.resource<transient>{%c8}) {
stream.cmd.dispatch @__builtin_splat_i64::@__builtin_splat_i64[%c1](%c5_i32 : i32
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment