Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created October 25, 2024 13:36
Show Gist options
  • Save pashu123/d77c63871e1657b7ffd70f38d0ea37d7 to your computer and use it in GitHub Desktop.
Save pashu123/d77c63871e1657b7ffd70f38d0ea37d7 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
// -----// IR Dump After AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
func.func @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c40896 = arith.constant 40896 : index
%c3720640 = arith.constant 3720640 : index
%c259584 = arith.constant 259584 : index
%c605184 = arith.constant 605184 : index
%c1297408 = arith.constant 1297408 : index
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After IREEImportPublicPass (iree-import-public) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c40896 = arith.constant 40896 : index
%c3720640 = arith.constant 3720640 : index
%c259584 = arith.constant 259584 : index
%c605184 = arith.constant 605184 : index
%c1297408 = arith.constant 1297408 : index
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After ImportMLProgramPass (iree-import-ml-program) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c40896 = arith.constant 40896 : index
%c3720640 = arith.constant 3720640 : index
%c259584 = arith.constant 259584 : index
%c605184 = arith.constant 605184 : index
%c1297408 = arith.constant 1297408 : index
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After SanitizeModuleNamesPass (iree-sanitize-module-names) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%c40896 = arith.constant 40896 : index
%c3720640 = arith.constant 3720640 : index
%c259584 = arith.constant 259584 : index
%c605184 = arith.constant 605184 : index
%c1297408 = arith.constant 1297408 : index
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = util.call @_test_dispatch(%0, %1, %2) : (tensor<1x2x128xf32>, tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
util.func private @_test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func private @_test_dispatch(%arg0: tensor<1x2x128xf32>, %arg1: tensor<1x2x48x30x30xf32>, %arg2: tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<1x2x128x26x26xf32>
%1 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x2x128xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%2 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg1, %arg2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%1 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%2 : tensor<1x2x128x26x26xf32>) outs(%0 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%4 = arith.cmpf ugt, %in, %cst : f32
%5 = arith.select %4, %in, %cst : f32
linalg.yield %5 : f32
} -> tensor<1x2x128x26x26xf32>
util.return %3 : tensor<1x2x128x26x26xf32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = util.call @_test_dispatch(%0, %1, %2) : (tensor<1x2x128xf32>, tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
module {
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {hal.device.targets = [#device_target_local]} {
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyDevicesPass (iree-hal-verify-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%8 = arith.cmpf ugt, %in, %cst : f32
%9 = arith.select %8, %in, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
}
// -----// IR Dump After ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After DecomposeConcatPass (iree-global-opt-decompose-concat) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%0 : tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<1x2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %4 : tensor<1x2x128x26x26xf32>, tensor<1x2x128x26x26xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8 : tensor<1x2x128x26x26xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%11 = arith.cmpf ugt, %in, %cst : f32
%12 = arith.select %11, %in, %cst : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
%10 = hal.tensor.export %9 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %10 : !hal.buffer_view
}
// -----// IR Dump After FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = tensor.empty() : tensor<2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%9 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%14 = arith.addf %in, %in_1 : f32
linalg.yield %14 : f32
} -> tensor<2x128x26x26xf32>
%11 = tensor.empty() : tensor<2x128x26x26xf32>
%12 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%10 : tensor<2x128x26x26xf32>) outs(%11 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%14 = arith.cmpf ugt, %in, %cst : f32
%15 = arith.select %14, %in, %cst : f32
linalg.yield %15 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %12 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%13 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
}
// -----// IR Dump After DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = tensor.empty() : tensor<2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%9 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%14 = arith.addf %in, %in_1 : f32
linalg.yield %14 : f32
} -> tensor<2x128x26x26xf32>
%11 = tensor.empty() : tensor<2x128x26x26xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%10 : tensor<2x128x26x26xf32>) outs(%11 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%14 = arith.cmpf ugt, %in, %cst : f32
%15 = arith.select %14, %in, %cst : f32
linalg.yield %15 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %12 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%13 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = tensor.empty() : tensor<2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%9 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%14 = arith.addf %in, %in_1 : f32
linalg.yield %14 : f32
} -> tensor<2x128x26x26xf32>
%11 = tensor.empty() : tensor<2x128x26x26xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%10 : tensor<2x128x26x26xf32>) outs(%11 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%14 = arith.cmpf ugt, %in, %cst : f32
%15 = arith.select %14, %in, %cst : f32
linalg.yield %15 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %12 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%13 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %13 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After SetEncodingPass (iree-dispatch-creation-set-encoding) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CPUMaterializeHostEncodingPass (iree-codegen-cpu-materialize-host-encoding) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After CSE (cse) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After HoistIntoGlobals (iree-util-hoist-into-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After JitGlobalsPass (iree-consteval-jit-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
}
// -----// IR Dump After FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed : tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
} -> tensor<2x128x26x26xf32>
%5 = tensor.empty() : tensor<1x2x128x26x26xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %7 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%3 : tensor<2x128x26x26xf32>) -> tensor<2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %4 : tensor<2x128x26x26xf32>, tensor<2x128x26x26xf32>) outs(%8 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%12 = arith.addf %in, %in_1 : f32
linalg.yield %12 : f32
} -> tensor<2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%9 : tensor<2x128x26x26xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %out: f32):
%12 = arith.cmpf ugt, %in, %cst : f32
%13 = arith.select %12, %in, %cst : f32
linalg.yield %13 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %10 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%11 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %11 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = tensor.empty() : tensor<1x2x128x26x26xf32>
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %6 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %collapsed : tensor<2x128x26x26xf32>, tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%9 = arith.addf %in, %in_1 : f32
%10 = arith.cmpf ugt, %9, %cst : f32
%11 = arith.select %10, %9, %cst : f32
linalg.yield %11 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %7 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%8 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = tensor.empty() : tensor<1x2x128x26x26xf32>
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %6 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %collapsed : tensor<2x128x26x26xf32>, tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%9 = arith.addf %in, %in_1 : f32
%10 = arith.cmpf ugt, %9, %cst : f32
%11 = arith.select %10, %9, %cst : f32
linalg.yield %11 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %7 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%8 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%collapsed = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<1x2x128xf32> into tensor<2x128xf32>
%3 = tensor.empty() : tensor<2x128x26x26xf32>
%4 = tensor.empty() : tensor<1x2x128x26x26xf32>
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%5 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%collapsed_0 = tensor.collapse_shape %6 [[0, 1], [2], [3], [4]] : tensor<1x2x128x26x26xf32> into tensor<2x128x26x26xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%collapsed_0, %collapsed : tensor<2x128x26x26xf32>, tensor<2x128xf32>) outs(%3 : tensor<2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%9 = arith.addf %in, %in_1 : f32
%10 = arith.cmpf ugt, %9, %cst : f32
%11 = arith.select %10, %9, %cst : f32
linalg.yield %11 : f32
} -> tensor<2x128x26x26xf32>
%expanded = tensor.expand_shape %7 [[0, 1], [2], [3], [4]] output_shape [1, 2, 128, 26, 26] : tensor<2x128x26x26xf32> into tensor<1x2x128x26x26xf32>
%8 = hal.tensor.export %expanded "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = tensor.empty() : tensor<1x2x128x26x26xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
%10 = arith.cmpf ugt, %9, %cst : f32
%11 = arith.select %10, %9, %cst : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%8 = hal.tensor.export %7 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = tensor.empty() : tensor<1x2x128x26x26xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
%10 = arith.cmpf ugt, %9, %cst : f32
%11 = arith.select %10, %9, %cst : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%8 = hal.tensor.export %7 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = tensor.empty() : tensor<1x2x128x26x26xf32>
%7 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
%10 = arith.cmpf ugt, %9, %cst : f32
%11 = arith.select %10, %9, %cst : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
%8 = hal.tensor.export %7 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%8 = arith.addf %in, %in_0 : f32
%9 = arith.cmpf ugt, %8, %cst : f32
%10 = arith.select %9, %8, %cst : f32
linalg.yield %10 : f32
} -> tensor<1x2x128x26x26xf32>
%7 = hal.tensor.export %6 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %7 : !hal.buffer_view
}
// -----// IR Dump After FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = flow.dispatch.region -> (tensor<1x2x128x26x26xf32>) {
%7 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%7, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%9 = arith.addf %in, %in_0 : f32
%10 = arith.cmpf ugt, %9, %cst : f32
%11 = arith.select %10, %9, %cst : f32
linalg.yield %11 : f32
} -> tensor<1x2x128x26x26xf32>
flow.return %8 : tensor<1x2x128x26x26xf32>
}
%6 = hal.tensor.export %5 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %6 : !hal.buffer_view
}
// -----// IR Dump After CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = flow.dispatch.region -> (tensor<1x2x128x26x26xf32>) {
%6 = tensor.empty() : tensor<1x2x128x26x26xf32>
%cst_0 = arith.constant 0.000000e+00 : f32
%7 = linalg.fill ins(%cst_0 : f32) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%10 = arith.addf %in, %in_1 : f32
%11 = arith.cmpf ugt, %10, %cst_0 : f32
%12 = arith.select %11, %10, %cst_0 : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
flow.return %9 : tensor<1x2x128x26x26xf32>
}
%5 = hal.tensor.export %4 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = flow.dispatch.region -> (tensor<1x2x128x26x26xf32>) {
%6 = tensor.empty() : tensor<1x2x128x26x26xf32>
%cst_0 = arith.constant 0.000000e+00 : f32
%7 = linalg.fill ins(%cst_0 : f32) outs(%6 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%8 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%1, %2 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%8, %0 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%6 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%10 = arith.addf %in, %in_1 : f32
%11 = arith.cmpf ugt, %10, %cst_0 : f32
%12 = arith.select %11, %10, %cst_0 : f32
linalg.yield %12 : f32
} -> tensor<1x2x128x26x26xf32>
flow.return %9 : tensor<1x2x128x26x26xf32>
}
%5 = hal.tensor.export %4 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%6 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%7 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%8 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%9 = tensor.empty() : tensor<1x2x128x26x26xf32>
%cst_0 = arith.constant 0.000000e+00 : f32
%10 = linalg.fill ins(%cst_0 : f32) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%6, %7 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%10 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%11, %8 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_1: f32, %out: f32):
%13 = arith.addf %in, %in_1 : f32
%14 = arith.cmpf ugt, %13, %cst_0 : f32
%15 = arith.select %14, %13, %cst_0 : f32
linalg.yield %15 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %12, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
}
%5 = hal.tensor.export %4 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After VerifyInputLegalityPass (iree-verify-input-legality) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch.workgroups(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32> =
(%arg3: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg4: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg5: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg6: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %arg4, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %arg5, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %arg6, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After StripDebugOps (iree-util-strip-debug-ops) //----- //
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OutlineConstantsPass (iree-flow-outline-constants) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CanonicalizerPass (iree-flow-canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyInputPass (iree-stream-verify-input) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
flow.executable private @test_dispatch_dispatch_0 {
flow.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>, %arg2: !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>, %arg3: !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%2 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%3 = tensor.empty() : tensor<1x2x128x26x26xf32>
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%5 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%0, %1 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%4 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%6 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5, %2 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%3 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%7 = arith.addf %in, %in_0 : f32
%8 = arith.cmpf ugt, %7, %cst : f32
%9 = arith.select %8, %7, %cst : f32
linalg.yield %9 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %6, %arg3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x2x128xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x2x48x30x30xf32>
%2 = hal.tensor.import %arg2 "input2" : !hal.buffer_view -> tensor<2x128x48x5x5xf32>
%3 = flow.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1, %2, %0) : (tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>, tensor<1x2x128xf32>) -> tensor<1x2x128x26x26xf32>
%4 = hal.tensor.export %3 "output0" : tensor<1x2x128x26x26xf32> -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ConvertToStreamPass (iree-stream-conversion) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
%c128 = arith.constant 128 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
%c1_2 = arith.constant 1 : index
%c2_3 = arith.constant 2 : index
%c48 = arith.constant 48 : index
%c30 = arith.constant 30 : index
%c30_4 = arith.constant 30 : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1_2, %c2_3, %c48, %c30, %c30_4]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%element_type_f32_5 = hal.element_type<f32> : i32
%dense_row_major_6 = hal.encoding_type<dense_row_major> : i32
%c2_7 = arith.constant 2 : index
%c128_8 = arith.constant 128 : index
%c48_9 = arith.constant 48 : index
%c5 = arith.constant 5 : index
%c5_10 = arith.constant 5 : index
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2_7, %c128_8, %c48_9, %c5, %c5_10]) type(%element_type_f32_5) encoding(%dense_row_major_6)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%c0 = arith.constant 0 : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
%c128 = arith.constant 128 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
%c1_2 = arith.constant 1 : index
%c2_3 = arith.constant 2 : index
%c48 = arith.constant 48 : index
%c30 = arith.constant 30 : index
%c30_4 = arith.constant 30 : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1_2, %c2_3, %c48, %c30, %c30_4]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%element_type_f32_5 = hal.element_type<f32> : i32
%dense_row_major_6 = hal.encoding_type<dense_row_major> : i32
%c2_7 = arith.constant 2 : index
%c128_8 = arith.constant 128 : index
%c48_9 = arith.constant 48 : index
%c5 = arith.constant 5 : index
%c5_10 = arith.constant 5 : index
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2_7, %c128_8, %c48_9, %c5, %c5_10]) type(%element_type_f32_5) encoding(%dense_row_major_6)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%c0 = arith.constant 0 : index
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%element_type_f32_2 = hal.element_type<f32> : i32
%dense_row_major_3 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32_2) encoding(%dense_row_major_3)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After Inliner (inline) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%element_type_f32_2 = hal.element_type<f32> : i32
%dense_row_major_3 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32_2) encoding(%dense_row_major_3)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
%element_type_f32_2 = hal.element_type<f32> : i32
%dense_row_major_3 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32_2) encoding(%dense_row_major_3)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After CombineInitializers (iree-util-combine-initializers) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128xf32> : index
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x48x30x30xf32> : index
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<2x128x48x5x5xf32> : index
%7 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%6}
%8 = stream.async.transfer %7 : !stream.resource<external>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%6}
%9 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x2x128x26x26xf32> : index
%10 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%5[%c0 to %3 for %3], %8[%c0 to %6 for %6], %2[%c0 to %0 for %0]) : (!stream.resource<*>{%3}, !stream.resource<*>{%6}, !stream.resource<*>{%0}) -> !stream.resource<*>{%9}
%11 = stream.async.transfer %10 : !stream.resource<*>{%9} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%9}
%12 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %11 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%9} -> !hal.buffer_view
util.return %12 : !hal.buffer_view
}
}
// -----// IR Dump After EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c1024} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c345600} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%5 = stream.async.transfer %4 : !stream.resource<external>{%c1228800} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c1228800}
%6 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%3[%c0 to %c345600 for %c345600], %5[%c0 to %c1228800 for %c1228800], %1[%c0 to %c1024 for %c1024]) : (!stream.resource<*>{%c345600}, !stream.resource<*>{%c1228800}, !stream.resource<*>{%c1024}) -> !stream.resource<*>{%c692224}
%7 = stream.async.transfer %6 : !stream.resource<*>{%c692224} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
// -----// IR Dump After RefineUsagePass (iree-stream-refine-usage) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%1[%c0 to %c345600 for %c345600], %2[%c0 to %c1228800 for %c1228800], %0[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.timepoint.immediate => !stream.timepoint
%4 = stream.timepoint.immediate => !stream.timepoint
%5 = stream.timepoint.immediate => !stream.timepoint
%6 = stream.timepoint.join max(%3, %4, %5) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%6) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%9 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %9 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%7 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%3 = stream.timepoint.immediate => !stream.timepoint
%4 = stream.timepoint.immediate => !stream.timepoint
%5 = stream.timepoint.immediate => !stream.timepoint
%6 = stream.timepoint.join max(%3, %4, %5) => !stream.timepoint
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%6) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%9 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %9 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%7 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%8 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %7 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %8 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224} {
%5 = stream.async.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg3[%c0 to %c345600 for %c345600], %arg4[%c0 to %c1228800 for %c1228800], %arg5[%c0 to %c1024 for %c1024]) : (!stream.resource<external>{%c345600}, !stream.resource<external>{%c1228800}, !stream.resource<external>{%c1024}) -> !stream.resource<external>{%c692224}
stream.yield %5 : !stream.resource<external>{%c692224}
} => !stream.timepoint
%3 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c692224}
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %4 : !hal.buffer_view
}
}
// -----// IR Dump After ScheduleAllocationPass (iree-stream-schedule-allocation) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After PackConstantsPass (iree-stream-pack-constants) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After LayoutSlicesPass (iree-stream-layout-slices) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After PropagateSubranges (iree-util-propagate-subranges) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After ElideTimepointsPass (iree-stream-elide-timepoints) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: !stream.binding, %arg4: index, %arg5: index, %arg6: index, %arg7: index) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%arg5] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%arg6] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%arg7] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0, %c0, %c0, %c0 : index, index, index, index) {
ro %arg3[%c0_0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0_0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0_0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}, %arg6: index {stream.values = [0 : index]}, %arg7: index {stream.values = [0 : index]}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%arg5] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%arg6] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%arg7] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0, %c0, %c0, %c0 : index, index, index, index) {
ro %arg3[%c0_0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0_0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0_0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After AnnotateDispatchAssumptionsPass (iree-stream-annotate-dispatch-assumptions) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}, %arg6: index {stream.values = [0 : index]}, %arg7: index {stream.values = [0 : index]}) {
%0:4 = util.assume.int
%arg4<umin = 0, umax = 0>,
%arg5<umin = 0, umax = 0>,
%arg6<umin = 0, umax = 0>,
%arg7<umin = 0, umax = 0>
: index, index, index, index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%1 = stream.binding.subspan %arg0[%0#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%2 = stream.binding.subspan %arg1[%0#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%3 = stream.binding.subspan %arg2[%0#2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%4 = stream.binding.subspan %arg3[%0#3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%7 = flow.dispatch.tensor.load %3, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%8 = tensor.empty() : tensor<1x2x128x26x26xf32>
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%5, %6 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%9 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%11 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%10, %7 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%12 = arith.addf %in, %in_0 : f32
%13 = arith.cmpf ugt, %12, %cst : f32
%14 = arith.select %13, %12, %cst : f32
linalg.yield %14 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %11, %4, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0, %c0, %c0, %c0 : index, index, index, index) {
ro %arg3[%c0_0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0_0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0_0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32) {
%0 = arith.extui %arg4 : i32 to i64
%1 = arith.extui %arg5 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg6 : i32 to i64
%6 = arith.extui %arg7 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg8 : i32 to i64
%11 = arith.extui %arg9 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%12 = arith.shli %11, %c32_i64_1 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg10 : i32 to i64
%16 = arith.extui %arg11 : i32 to i64
%c32_i64_2 = arith.constant 32 : i64
%17 = arith.shli %16, %c32_i64_2 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20:4 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>,
%19<umin = 0, umax = 0>
: index, index, index, index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%21 = stream.binding.subspan %arg0[%20#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%22 = stream.binding.subspan %arg1[%20#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%23 = stream.binding.subspan %arg2[%20#2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%24 = stream.binding.subspan %arg3[%20#3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%25 = flow.dispatch.tensor.load %21, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%26 = flow.dispatch.tensor.load %22, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%27 = flow.dispatch.tensor.load %23, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%28 = tensor.empty() : tensor<1x2x128x26x26xf32>
%29 = linalg.fill ins(%cst : f32) outs(%28 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%30 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%25, %26 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%29 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%31 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%30, %27 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%28 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_3: f32, %out: f32):
%32 = arith.addf %in, %in_3 : f32
%33 = arith.cmpf ugt, %32, %cst : f32
%34 = arith.select %33, %32, %cst : f32
linalg.yield %34 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %31, %24, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%c0_i64 = arith.constant 0 : i64
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%c0_i64_1 = arith.constant 0 : i64
%c0_i32_2 = arith.constant 0 : i32
%c0_i64_3 = arith.constant 0 : i64
%c0_i32_4 = arith.constant 0 : i32
%c32_i64_5 = arith.constant 32 : i64
%c0_i64_6 = arith.constant 0 : i64
%c0_i32_7 = arith.constant 0 : i32
%c0_i64_8 = arith.constant 0 : i64
%c0_i32_9 = arith.constant 0 : i32
%c32_i64_10 = arith.constant 32 : i64
%c0_i64_11 = arith.constant 0 : i64
%c0_i32_12 = arith.constant 0 : i32
%c0_i64_13 = arith.constant 0 : i64
%c0_i32_14 = arith.constant 0 : i32
%c32_i64_15 = arith.constant 32 : i64
%c0_i64_16 = arith.constant 0 : i64
%c0_i32_17 = arith.constant 0 : i32
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32_2, %c0_i32_4, %c0_i32_7, %c0_i32_9, %c0_i32_12, %c0_i32_14, %c0_i32_17 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0_0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0_0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0_0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0_0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg4 : i32 to i64
%1 = arith.extui %arg5 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg6 : i32 to i64
%6 = arith.extui %arg7 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg8 : i32 to i64
%11 = arith.extui %arg9 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg10 : i32 to i64
%16 = arith.extui %arg11 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20:4 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>,
%19<umin = 0, umax = 0>
: index, index, index, index
%21 = stream.binding.subspan %arg0[%20#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%22 = stream.binding.subspan %arg1[%20#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%23 = stream.binding.subspan %arg2[%20#2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%24 = stream.binding.subspan %arg3[%20#3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%25 = flow.dispatch.tensor.load %21, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%26 = flow.dispatch.tensor.load %22, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%27 = flow.dispatch.tensor.load %23, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%28 = tensor.empty() : tensor<1x2x128x26x26xf32>
%29 = linalg.fill ins(%cst : f32) outs(%28 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%30 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%25, %26 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%29 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%31 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%30, %27 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%28 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%32 = arith.addf %in, %in_0 : f32
%33 = arith.cmpf ugt, %32, %cst : f32
%34 = arith.select %33, %32, %cst : f32
linalg.yield %34 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %31, %24, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg4 : i32 to i64
%1 = arith.extui %arg5 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg6 : i32 to i64
%6 = arith.extui %arg7 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg8 : i32 to i64
%11 = arith.extui %arg9 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg10 : i32 to i64
%16 = arith.extui %arg11 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20:4 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>,
%19<umin = 0, umax = 0>
: index, index, index, index
%21 = stream.binding.subspan %arg0[%20#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%22 = stream.binding.subspan %arg1[%20#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%23 = stream.binding.subspan %arg2[%20#2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%24 = stream.binding.subspan %arg3[%20#3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%25 = flow.dispatch.tensor.load %21, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%26 = flow.dispatch.tensor.load %22, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%27 = flow.dispatch.tensor.load %23, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%28 = tensor.empty() : tensor<1x2x128x26x26xf32>
%29 = linalg.fill ins(%cst : f32) outs(%28 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%30 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%25, %26 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%29 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%31 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%30, %27 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%28 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%32 = arith.addf %in, %in_0 : f32
%33 = arith.cmpf ugt, %32, %cst : f32
%34 = arith.select %33, %32, %cst : f32
linalg.yield %34 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %31, %24, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg4 : i32 to i64
%1 = arith.extui %arg5 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg6 : i32 to i64
%6 = arith.extui %arg7 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg8 : i32 to i64
%11 = arith.extui %arg9 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg10 : i32 to i64
%16 = arith.extui %arg11 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20:4 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>,
%19<umin = 0, umax = 0>
: index, index, index, index
%21 = stream.binding.subspan %arg0[%20#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%22 = stream.binding.subspan %arg1[%20#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%23 = stream.binding.subspan %arg2[%20#2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%24 = stream.binding.subspan %arg3[%20#3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%25 = flow.dispatch.tensor.load %21, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%26 = flow.dispatch.tensor.load %22, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%27 = flow.dispatch.tensor.load %23, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%28 = tensor.empty() : tensor<1x2x128x26x26xf32>
%29 = linalg.fill ins(%cst : f32) outs(%28 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%30 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%25, %26 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%29 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%31 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%30, %27 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%28 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%32 = arith.addf %in, %in_0 : f32
%33 = arith.cmpf ugt, %32, %cst : f32
%34 = arith.select %33, %32, %cst : f32
linalg.yield %34 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %31, %24, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32, %arg9: i32, %arg10: i32, %arg11: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg4 : i32 to i64
%1 = arith.extui %arg5 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg6 : i32 to i64
%6 = arith.extui %arg7 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg8 : i32 to i64
%11 = arith.extui %arg9 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %arg10 : i32 to i64
%16 = arith.extui %arg11 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20:4 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>,
%19<umin = 0, umax = 0>
: index, index, index, index
%21 = stream.binding.subspan %arg0[%20#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%22 = stream.binding.subspan %arg1[%20#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%23 = stream.binding.subspan %arg2[%20#2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%24 = stream.binding.subspan %arg3[%20#3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%25 = flow.dispatch.tensor.load %21, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%26 = flow.dispatch.tensor.load %22, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%27 = flow.dispatch.tensor.load %23, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%28 = tensor.empty() : tensor<1x2x128x26x26xf32>
%29 = linalg.fill ins(%cst : f32) outs(%28 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%30 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%25, %26 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%29 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%31 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%30, %27 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%28 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%32 = arith.addf %in, %in_0 : f32
%33 = arith.cmpf ugt, %32, %cst : f32
%34 = arith.select %33, %32, %cst : f32
linalg.yield %34 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %31, %24, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32, i32, i32) {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}) {
%c0_i32 = arith.constant 0 : i32
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %c0_i32 : i32 to i64
%1 = arith.extui %c0_i32 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %c0_i32 : i32 to i64
%6 = arith.extui %c0_i32 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %c0_i32 : i32 to i64
%11 = arith.extui %c0_i32 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = arith.extui %c0_i32 : i32 to i64
%16 = arith.extui %c0_i32 : i32 to i64
%17 = arith.shli %16, %c32_i64 : i64
%18 = arith.ori %15, %17 : i64
%19 = arith.index_castui %18 {stream.values = [0 : index]} : i64 to index
%20:4 = util.assume.int
%4<umin = 0, umax = 0>,
%9<umin = 0, umax = 0>,
%14<umin = 0, umax = 0>,
%19<umin = 0, umax = 0>
: index, index, index, index
%21 = stream.binding.subspan %arg0[%20#0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%22 = stream.binding.subspan %arg1[%20#1] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%23 = stream.binding.subspan %arg2[%20#2] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%24 = stream.binding.subspan %arg3[%20#3] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%25 = flow.dispatch.tensor.load %21, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%26 = flow.dispatch.tensor.load %22, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%27 = flow.dispatch.tensor.load %23, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%28 = tensor.empty() : tensor<1x2x128x26x26xf32>
%29 = linalg.fill ins(%cst : f32) outs(%28 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%30 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%25, %26 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%29 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%31 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%30, %27 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%28 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%32 = arith.addf %in, %in_0 : f32
%33 = arith.cmpf ugt, %32, %cst : f32
%34 = arith.select %33, %32, %cst : f32
linalg.yield %34 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %31, %24, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After CSE (cse) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After OptimizeIntArithmetic (iree-util-optimize-int-arithmetic) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
// -----// IR Dump After ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After IPO (iree-util-ipo) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After SymbolDCE (symbol-dce) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0, 0], sizes = [2, 128, 48, 5, 5], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>> -> tensor<2x128x48x5x5xf32>
%6 = flow.dispatch.tensor.load %2, offsets = [0, 0, 0], sizes = [1, 2, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>> -> tensor<1x2x128xf32>
%7 = tensor.empty() : tensor<1x2x128x26x26xf32>
%8 = linalg.fill ins(%cst : f32) outs(%7 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%9 = linalg.conv_2d_ngchw_gfchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%4, %5 : tensor<1x2x48x30x30xf32>, tensor<2x128x48x5x5xf32>) outs(%8 : tensor<1x2x128x26x26xf32>) -> tensor<1x2x128x26x26xf32>
%10 = linalg.generic {indexing_maps = [#map, #map1, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%9, %6 : tensor<1x2x128x26x26xf32>, tensor<1x2x128xf32>) outs(%7 : tensor<1x2x128x26x26xf32>) {
^bb0(%in: f32, %in_0: f32, %out: f32):
%11 = arith.addf %in, %in_0 : f32
%12 = arith.cmpf ugt, %11, %cst : f32
%13 = arith.select %12, %11, %cst : f32
linalg.yield %13 : f32
} -> tensor<1x2x128x26x26xf32>
flow.dispatch.tensor.store %10, %3, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 128, 26, 26], strides = [1, 1, 1, 1, 1] : tensor<1x2x128x26x26xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
return
}
}
}
util.func public @test_dispatch(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @test_dispatch(%input0: tensor<1x2x128xf32>, %input1: tensor<1x2x48x30x30xf32>, %input2: tensor<2x128x48x5x5xf32>) -> (%output0: tensor<1x2x128x26x26xf32>)"}} {
%c692224 = arith.constant 692224 : index
%c1228800 = arith.constant 1228800 : index
%c345600 = arith.constant 345600 : index
%c1024 = arith.constant 1024 : index
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c30 = arith.constant 30 : index
%c48 = arith.constant 48 : index
%c128 = arith.constant 128 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c2, %c128]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x2x128xf32> in !stream.resource<external>{%c1024}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c2, %c48, %c30, %c30]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x2x48x30x30xf32> in !stream.resource<external>{%c345600}
hal.buffer_view.assert<%arg2 : !hal.buffer_view> message("input2") shape([%c2, %c128, %c48, %c5, %c5]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg2 : !hal.buffer_view -> tensor<2x128x48x5x5xf32> in !stream.resource<external>{%c1228800}
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c692224} => !stream.timepoint
%3 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%1 as %arg3: !stream.resource<external>{%c345600}, %2 as %arg4: !stream.resource<external>{%c1228800}, %0 as %arg5: !stream.resource<external>{%c1024}, %result as %arg6: !stream.resource<external>{%c692224}) {
stream.cmd.dispatch @test_dispatch_dispatch_0::@test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 {
ro %arg3[%c0 for %c345600] : !stream.resource<external>{%c345600},
ro %arg4[%c0 for %c1228800] : !stream.resource<external>{%c1228800},
ro %arg5[%c0 for %c1024] : !stream.resource<external>{%c1024},
wo %arg6[%c0 for %c692224] : !stream.resource<external>{%c692224}
}
} => !stream.timepoint
%4 = stream.timepoint.await %3 => %result : !stream.resource<external>{%c692224}
%5 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %4 : tensor<1x2x128x26x26xf32> in !stream.resource<external>{%c692224} -> !hal.buffer_view
util.return %5 : !hal.buffer_view
}
}
// -----// IR Dump After AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- //
#executable_target_embedded_elf_x86_64_ = #hal.executable.target<"llvm-cpu", "embedded-elf-x86_64", {cpu = "", cpu_features = "", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", native_vector_size = 16 : i64, target_triple = "x86_64-unknown-unknown-eabi-elf"}>
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
#device_target_local = #hal.device.target<"local", [#executable_target_embedded_elf_x86_64_]> : !hal.device
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} {
util.global private @__device_0 = #device_target_local
stream.executable private @test_dispatch_dispatch_0 {
stream.executable.export public @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @test_dispatch_dispatch_0_conv_2d_ngchw_gfchw_1x2x128x26x26x48x5x5_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<2x128x48x5x5xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x2x128xf32>>
%3 = stream.binding.subspan %arg3[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x2x128x26x26xf32>>
%4 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0, 0], sizes = [1, 2, 48, 30, 30], strides = [1, 1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x2x48x30x30xf32>> -> tensor<1x2x48x30x30xf32>
%5 = f
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment