Created
December 16, 2021 04:48
-
-
Save stellaraccident/f9a1019ab187e15e70732829515f9cd0 to your computer and use it in GitHub Desktop.
Const prop
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module @aqt_matmul { | |
iree_input.global private @_params$0 = dense<[[0.000000e+00, 5.003000e+02, 1.000600e+03], [1500.8999, 2.001200e+03, 2.501500e+03], [3001.7998, 3502.09985, 4.002400e+03], [4502.69971, 5.003000e+03, 5.503300e+03], [6003.59961, 6503.8999, 7004.1997], [7.504500e+03, 8004.7998, 8.505100e+03]]> : tensor<6x3xf32> | |
iree_input.global private @_params$1 = dense<5.000000e+00> : tensor<f32> | |
func @compute_native(%arg0: tensor<5x6xf32>) -> tensor<5x3xf32> { | |
%0 = iree_input.global.load @_params$0 : tensor<6x3xf32> | |
%1 = iree_input.global.load @_params$1 : tensor<f32> | |
%2 = call @main(%0, %1, %arg0) : (tensor<6x3xf32>, tensor<f32>, tensor<5x6xf32>) -> tensor<5x3xf32> | |
return %2 : tensor<5x3xf32> | |
} | |
func private @main(%arg0: tensor<6x3xf32>, %arg1: tensor<f32>, %arg2: tensor<5x6xf32>) -> tensor<5x3xf32> { | |
%0 = mhlo.constant dense<5.000000e-01> : tensor<6x3xf32> | |
%1 = mhlo.constant dense<1.270000e+02> : tensor<f32> | |
%2 = mhlo.constant dense<0xFF800000> : tensor<f32> | |
%3 = mhlo.constant dense<127> : tensor<i32> | |
%4 = mhlo.constant dense<-127> : tensor<i32> | |
%5 = mhlo.constant dense<5.000000e-01> : tensor<5x6xf32> | |
%6 = "mhlo.broadcast_in_dim"(%arg1) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<5x6xf32> | |
%7 = mhlo.multiply %arg2, %6 : tensor<5x6xf32> | |
%8 = mhlo.add %7, %5 : tensor<5x6xf32> | |
%9 = "mhlo.floor"(%8) : (tensor<5x6xf32>) -> tensor<5x6xf32> | |
%10 = call @jit_clip(%9, %4, %3) : (tensor<5x6xf32>, tensor<i32>, tensor<i32>) -> tensor<5x6xf32> | |
%11 = "mhlo.convert"(%10) : (tensor<5x6xf32>) -> tensor<5x6xi8> | |
%12 = "mhlo.abs"(%arg0) : (tensor<6x3xf32>) -> tensor<6x3xf32> | |
%13 = mhlo.reduce %12, %2 ( { | |
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>): // no predecessors | |
%25 = mhlo.maximum %arg3, %arg4 : tensor<f32> | |
"mhlo.return"(%25) : (tensor<f32>) -> () | |
}) {dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<6x3xf32>, tensor<f32>) -> tensor<f32> | |
%14 = mhlo.divide %1, %13 : tensor<f32> | |
%15 = "mhlo.broadcast_in_dim"(%14) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<6x3xf32> | |
%16 = mhlo.multiply %arg0, %15 : tensor<6x3xf32> | |
%17 = mhlo.add %16, %0 : tensor<6x3xf32> | |
%18 = "mhlo.floor"(%17) : (tensor<6x3xf32>) -> tensor<6x3xf32> | |
%19 = "mhlo.convert"(%18) : (tensor<6x3xf32>) -> tensor<6x3xi8> | |
%20 = "mhlo.dot_general"(%11, %19) {dot_dimension_numbers = #mhlo.dot<lhs_contracting_dimensions = [1], rhs_contracting_dimensions = [0]>, precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<5x6xi8>, tensor<6x3xi8>) -> tensor<5x3xi32> | |
%21 = mhlo.multiply %arg1, %14 : tensor<f32> | |
%22 = "mhlo.convert"(%20) : (tensor<5x3xi32>) -> tensor<5x3xf32> | |
%23 = "mhlo.broadcast_in_dim"(%21) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<5x3xf32> | |
%24 = mhlo.divide %22, %23 : tensor<5x3xf32> | |
return %24 : tensor<5x3xf32> | |
} | |
func private @jit_clip(%arg0: tensor<5x6xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<5x6xf32> { | |
%0 = "mhlo.convert"(%arg1) : (tensor<i32>) -> tensor<f32> | |
%1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<5x6xf32> | |
%2 = mhlo.maximum %1, %arg0 : tensor<5x6xf32> | |
%3 = "mhlo.convert"(%arg2) : (tensor<i32>) -> tensor<f32> | |
%4 = "mhlo.broadcast_in_dim"(%3) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<5x6xf32> | |
%5 = mhlo.minimum %4, %2 : tensor<5x6xf32> | |
return %5 : tensor<5x6xf32> | |
} | |
} | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#map0 = affine_map<(d0, d1) -> ()> | |
#map1 = affine_map<(d0, d1) -> (d0, d1)> | |
#map2 = affine_map<() -> ()> | |
module @aqt_matmul { | |
util.global private @_params$0 = dense<[[0.000000e+00, 5.003000e+02, 1.000600e+03], [1500.8999, 2.001200e+03, 2.501500e+03], [3001.7998, 3502.09985, 4.002400e+03], [4502.69971, 5.003000e+03, 5.503300e+03], [6003.59961, 6503.8999, 7004.1997], [7.504500e+03, 8004.7998, 8.505100e+03]]> : tensor<6x3xf32> | |
util.global private @_params$1 = dense<5.000000e+00> : tensor<f32> | |
func @compute_native(%arg0: tensor<5x6xf32>) -> tensor<5x3xf32> { | |
%c0_i32 = arith.constant 0 : i32 | |
%cst = arith.constant 0xFF800000 : f32 | |
%cst_0 = arith.constant dense<5.000000e-01> : tensor<6x3xf32> | |
%cst_1 = arith.constant dense<5.000000e-01> : tensor<5x6xf32> | |
%cst_2 = arith.constant dense<-1.270000e+02> : tensor<5x6xf32> | |
%cst_3 = arith.constant dense<1.270000e+02> : tensor<f32> | |
%cst_4 = arith.constant dense<1.270000e+02> : tensor<5x6xf32> | |
%_params$0 = util.global.load @_params$0 : tensor<6x3xf32> | |
%_params$1 = util.global.load @_params$1 : tensor<f32> | |
%0 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%1 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$1 : tensor<f32>) outs(%0 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
linalg.yield %arg1 : f32 | |
} -> tensor<5x6xf32> | |
%2 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%3 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%arg0, %1 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%2 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.mulf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<5x6xf32> | |
%4 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%3, %cst_1 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%4 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.addf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<5x6xf32> | |
%6 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%7 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%5 : tensor<5x6xf32>) outs(%6 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
%42 = math.floor %arg1 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<5x6xf32> | |
%8 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%7, %cst_2 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%8 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.maxf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<5x6xf32> | |
%10 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%11 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%9, %cst_4 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%10 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.minf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<5x6xf32> | |
%12 = linalg.init_tensor [5, 6] : tensor<5x6xi8> | |
%13 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%11 : tensor<5x6xf32>) outs(%12 : tensor<5x6xi8>) { | |
^bb0(%arg1: f32, %arg2: i8): // no predecessors | |
%42 = arith.fptosi %arg1 : f32 to i8 | |
linalg.yield %42 : i8 | |
} -> tensor<5x6xi8> | |
%14 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%15 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$0 : tensor<6x3xf32>) outs(%14 : tensor<6x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
%42 = math.abs %arg1 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<6x3xf32> | |
%16 = linalg.init_tensor [] : tensor<f32> | |
%17 = linalg.fill(%cst, %16) : f32, tensor<f32> -> tensor<f32> | |
%18 = linalg.generic {indexing_maps = [#map1, #map0], iterator_types = ["reduction", "reduction"]} ins(%15 : tensor<6x3xf32>) outs(%17 : tensor<f32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
%42 = arith.maxf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<f32> | |
%19 = linalg.init_tensor [] : tensor<f32> | |
%20 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = []} ins(%cst_3, %18 : tensor<f32>, tensor<f32>) outs(%19 : tensor<f32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.divf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<f32> | |
%21 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%22 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%20 : tensor<f32>) outs(%21 : tensor<6x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
linalg.yield %arg1 : f32 | |
} -> tensor<6x3xf32> | |
%23 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%24 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$0, %22 : tensor<6x3xf32>, tensor<6x3xf32>) outs(%23 : tensor<6x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.mulf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<6x3xf32> | |
%25 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%26 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%24, %cst_0 : tensor<6x3xf32>, tensor<6x3xf32>) outs(%25 : tensor<6x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.addf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<6x3xf32> | |
%27 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%28 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%26 : tensor<6x3xf32>) outs(%27 : tensor<6x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
%42 = math.floor %arg1 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<6x3xf32> | |
%29 = linalg.init_tensor [6, 3] : tensor<6x3xi8> | |
%30 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%28 : tensor<6x3xf32>) outs(%29 : tensor<6x3xi8>) { | |
^bb0(%arg1: f32, %arg2: i8): // no predecessors | |
%42 = arith.fptosi %arg1 : f32 to i8 | |
linalg.yield %42 : i8 | |
} -> tensor<6x3xi8> | |
%31 = linalg.init_tensor [5, 3] : tensor<5x3xi32> | |
%32 = linalg.fill(%c0_i32, %31) : i32, tensor<5x3xi32> -> tensor<5x3xi32> | |
%33 = linalg.matmul ins(%13, %30 : tensor<5x6xi8>, tensor<6x3xi8>) outs(%32 : tensor<5x3xi32>) -> tensor<5x3xi32> | |
%34 = linalg.init_tensor [] : tensor<f32> | |
%35 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = []} ins(%_params$1, %20 : tensor<f32>, tensor<f32>) outs(%34 : tensor<f32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.mulf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<f32> | |
%36 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%37 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%33 : tensor<5x3xi32>) outs(%36 : tensor<5x3xf32>) { | |
^bb0(%arg1: i32, %arg2: f32): // no predecessors | |
%42 = arith.sitofp %arg1 : i32 to f32 | |
linalg.yield %42 : f32 | |
} -> tensor<5x3xf32> | |
%38 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%39 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%35 : tensor<f32>) outs(%38 : tensor<5x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
linalg.yield %arg1 : f32 | |
} -> tensor<5x3xf32> | |
%40 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%41 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%37, %39 : tensor<5x3xf32>, tensor<5x3xf32>) outs(%40 : tensor<5x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%42 = arith.divf %arg1, %arg2 : f32 | |
linalg.yield %42 : f32 | |
} -> tensor<5x3xf32> | |
return %41 : tensor<5x3xf32> | |
} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#map0 = affine_map<(d0, d1) -> ()> | |
#map1 = affine_map<(d0, d1) -> (d0, d1)> | |
#map2 = affine_map<() -> ()> | |
module @aqt_matmul { | |
util.global private @_params$0 = dense<[[0.000000e+00, 5.003000e+02, 1.000600e+03], [1500.8999, 2.001200e+03, 2.501500e+03], [3001.7998, 3502.09985, 4.002400e+03], [4502.69971, 5.003000e+03, 5.503300e+03], [6003.59961, 6503.8999, 7004.1997], [7.504500e+03, 8004.7998, 8.505100e+03]]> : tensor<6x3xf32> | |
util.global private @_params$1 = dense<5.000000e+00> : tensor<f32> | |
func @compute_native(%arg0: tensor<5x6xf32>) -> tensor<5x3xf32> { | |
%c0_i32 = arith.constant 0 : i32 | |
%cst = arith.constant dense<5.000000e-01> : tensor<5x6xf32> | |
%cst_0 = arith.constant dense<-1.270000e+02> : tensor<5x6xf32> | |
%cst_1 = arith.constant dense<1.270000e+02> : tensor<5x6xf32> | |
%_params$1 = util.global.load @_params$1 : tensor<f32> | |
%0 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%1 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$1 : tensor<f32>) outs(%0 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
linalg.yield %arg1 : f32 | |
} -> tensor<5x6xf32> | |
%2 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%3 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%arg0, %1 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%2 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.mulf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%4 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%3, %cst : tensor<5x6xf32>, tensor<5x6xf32>) outs(%4 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.addf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%6 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%7 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%5 : tensor<5x6xf32>) outs(%6 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
%23 = math.floor %arg1 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%8 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%7, %cst_0 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%8 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.maxf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%10 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%11 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%9, %cst_1 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%10 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.minf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%12 = linalg.init_tensor [5, 6] : tensor<5x6xi8> | |
%13 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%11 : tensor<5x6xf32>) outs(%12 : tensor<5x6xi8>) { | |
^bb0(%arg1: f32, %arg2: i8): // no predecessors | |
%23 = arith.fptosi %arg1 : f32 to i8 | |
linalg.yield %23 : i8 | |
} -> tensor<5x6xi8> | |
%hoisted = util.global.load @hoisted : tensor<f32> | |
%14 = linalg.init_tensor [5, 3] : tensor<5x3xi32> | |
%15 = linalg.fill(%c0_i32, %14) : i32, tensor<5x3xi32> -> tensor<5x3xi32> | |
%hoisted_0 = util.global.load @hoisted_0 : tensor<6x3xi8> | |
%16 = linalg.matmul ins(%13, %hoisted_0 : tensor<5x6xi8>, tensor<6x3xi8>) outs(%15 : tensor<5x3xi32>) -> tensor<5x3xi32> | |
%17 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%18 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%16 : tensor<5x3xi32>) outs(%17 : tensor<5x3xf32>) { | |
^bb0(%arg1: i32, %arg2: f32): // no predecessors | |
%23 = arith.sitofp %arg1 : i32 to f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x3xf32> | |
%19 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%hoisted_1 = util.global.load @hoisted_1 : tensor<f32> | |
%20 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%hoisted_1 : tensor<f32>) outs(%19 : tensor<5x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
linalg.yield %arg1 : f32 | |
} -> tensor<5x3xf32> | |
%21 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%22 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%18, %20 : tensor<5x3xf32>, tensor<5x3xf32>) outs(%21 : tensor<5x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.divf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x3xf32> | |
return %22 : tensor<5x3xf32> | |
} | |
util.global private @hoisted : tensor<f32> | |
util.initializer { | |
%cst = arith.constant dense<1.270000e+02> : tensor<f32> | |
%_params$0 = util.global.load @_params$0 : tensor<6x3xf32> | |
%0 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%1 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$0 : tensor<6x3xf32>) outs(%0 : tensor<6x3xf32>) { | |
^bb0(%arg0: f32, %arg1: f32): // no predecessors | |
%7 = math.abs %arg0 : f32 | |
linalg.yield %7 : f32 | |
} -> tensor<6x3xf32> | |
%cst_0 = arith.constant 0xFF800000 : f32 | |
%2 = linalg.init_tensor [] : tensor<f32> | |
%3 = linalg.fill(%cst_0, %2) : f32, tensor<f32> -> tensor<f32> | |
%4 = linalg.generic {indexing_maps = [#map1, #map0], iterator_types = ["reduction", "reduction"]} ins(%1 : tensor<6x3xf32>) outs(%3 : tensor<f32>) { | |
^bb0(%arg0: f32, %arg1: f32): // no predecessors | |
%7 = arith.maxf %arg0, %arg1 : f32 | |
linalg.yield %7 : f32 | |
} -> tensor<f32> | |
%5 = linalg.init_tensor [] : tensor<f32> | |
%6 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = []} ins(%cst, %4 : tensor<f32>, tensor<f32>) outs(%5 : tensor<f32>) { | |
^bb0(%arg0: f32, %arg1: f32, %arg2: f32): // no predecessors | |
%7 = arith.divf %arg0, %arg1 : f32 | |
linalg.yield %7 : f32 | |
} -> tensor<f32> | |
util.global.store %6, @hoisted : tensor<f32> | |
util.initializer.return | |
} | |
util.global private @hoisted_0 : tensor<6x3xi8> | |
util.initializer { | |
%_params$0 = util.global.load @_params$0 : tensor<6x3xf32> | |
%hoisted = util.global.load @hoisted : tensor<f32> | |
%0 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%1 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%hoisted : tensor<f32>) outs(%0 : tensor<6x3xf32>) { | |
^bb0(%arg0: f32, %arg1: f32): // no predecessors | |
linalg.yield %arg0 : f32 | |
} -> tensor<6x3xf32> | |
%2 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%3 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$0, %1 : tensor<6x3xf32>, tensor<6x3xf32>) outs(%2 : tensor<6x3xf32>) { | |
^bb0(%arg0: f32, %arg1: f32, %arg2: f32): // no predecessors | |
%10 = arith.mulf %arg0, %arg1 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<6x3xf32> | |
%cst = arith.constant dense<5.000000e-01> : tensor<6x3xf32> | |
%4 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%3, %cst : tensor<6x3xf32>, tensor<6x3xf32>) outs(%4 : tensor<6x3xf32>) { | |
^bb0(%arg0: f32, %arg1: f32, %arg2: f32): // no predecessors | |
%10 = arith.addf %arg0, %arg1 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<6x3xf32> | |
%6 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%7 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%5 : tensor<6x3xf32>) outs(%6 : tensor<6x3xf32>) { | |
^bb0(%arg0: f32, %arg1: f32): // no predecessors | |
%10 = math.floor %arg0 : f32 | |
linalg.yield %10 : f32 | |
} -> tensor<6x3xf32> | |
%8 = linalg.init_tensor [6, 3] : tensor<6x3xi8> | |
%9 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%7 : tensor<6x3xf32>) outs(%8 : tensor<6x3xi8>) { | |
^bb0(%arg0: f32, %arg1: i8): // no predecessors | |
%10 = arith.fptosi %arg0 : f32 to i8 | |
linalg.yield %10 : i8 | |
} -> tensor<6x3xi8> | |
util.global.store %9, @hoisted_0 : tensor<6x3xi8> | |
util.initializer.return | |
} | |
util.global private @hoisted_1 : tensor<f32> | |
util.initializer { | |
%_params$1 = util.global.load @_params$1 : tensor<f32> | |
%cst = arith.constant dense<1.270000e+02> : tensor<f32> | |
%_params$0 = util.global.load @_params$0 : tensor<6x3xf32> | |
%0 = linalg.init_tensor [6, 3] : tensor<6x3xf32> | |
%1 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$0 : tensor<6x3xf32>) outs(%0 : tensor<6x3xf32>) { | |
^bb0(%arg0: f32, %arg1: f32): // no predecessors | |
%8 = math.abs %arg0 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<6x3xf32> | |
%cst_0 = arith.constant 0xFF800000 : f32 | |
%2 = linalg.init_tensor [] : tensor<f32> | |
%3 = linalg.fill(%cst_0, %2) : f32, tensor<f32> -> tensor<f32> | |
%4 = linalg.generic {indexing_maps = [#map1, #map0], iterator_types = ["reduction", "reduction"]} ins(%1 : tensor<6x3xf32>) outs(%3 : tensor<f32>) { | |
^bb0(%arg0: f32, %arg1: f32): // no predecessors | |
%8 = arith.maxf %arg0, %arg1 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<f32> | |
%5 = linalg.init_tensor [] : tensor<f32> | |
%hoisted = util.global.load @hoisted : tensor<f32> | |
%6 = linalg.init_tensor [] : tensor<f32> | |
%7 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = []} ins(%_params$1, %hoisted : tensor<f32>, tensor<f32>) outs(%6 : tensor<f32>) { | |
^bb0(%arg0: f32, %arg1: f32, %arg2: f32): // no predecessors | |
%8 = arith.mulf %arg0, %arg1 : f32 | |
linalg.yield %8 : f32 | |
} -> tensor<f32> | |
util.global.store %7, @hoisted_1 : tensor<f32> | |
util.initializer.return | |
} | |
} | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// iree-opt --iree-greedy-hoist-into-globals --iree-jiteval-globals ~/scratch/numopt/aqt_matmul_native_input.mlir | |
#map0 = affine_map<(d0, d1) -> ()> | |
#map1 = affine_map<(d0, d1) -> (d0, d1)> | |
module @aqt_matmul { | |
util.global private @_params$0 = dense<[[0.000000e+00, 5.003000e+02, 1.000600e+03], [1500.8999, 2.001200e+03, 2.501500e+03], [3001.7998, 3502.09985, 4.002400e+03], [4502.69971, 5.003000e+03, 5.503300e+03], [6003.59961, 6503.8999, 7004.1997], [7.504500e+03, 8004.7998, 8.505100e+03]]> : tensor<6x3xf32> | |
util.global private @_params$1 = dense<5.000000e+00> : tensor<f32> | |
func @compute_native(%arg0: tensor<5x6xf32>) -> tensor<5x3xf32> { | |
%c0_i32 = arith.constant 0 : i32 | |
%cst = arith.constant dense<5.000000e-01> : tensor<5x6xf32> | |
%cst_0 = arith.constant dense<-1.270000e+02> : tensor<5x6xf32> | |
%cst_1 = arith.constant dense<1.270000e+02> : tensor<5x6xf32> | |
%_params$1 = util.global.load @_params$1 : tensor<f32> | |
%0 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%1 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%_params$1 : tensor<f32>) outs(%0 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
linalg.yield %arg1 : f32 | |
} -> tensor<5x6xf32> | |
%2 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%3 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%arg0, %1 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%2 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.mulf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%4 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%5 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%3, %cst : tensor<5x6xf32>, tensor<5x6xf32>) outs(%4 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.addf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%6 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%7 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%5 : tensor<5x6xf32>) outs(%6 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
%23 = math.floor %arg1 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%8 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%9 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%7, %cst_0 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%8 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.maxf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%10 = linalg.init_tensor [5, 6] : tensor<5x6xf32> | |
%11 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%9, %cst_1 : tensor<5x6xf32>, tensor<5x6xf32>) outs(%10 : tensor<5x6xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.minf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x6xf32> | |
%12 = linalg.init_tensor [5, 6] : tensor<5x6xi8> | |
%13 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%11 : tensor<5x6xf32>) outs(%12 : tensor<5x6xi8>) { | |
^bb0(%arg1: f32, %arg2: i8): // no predecessors | |
%23 = arith.fptosi %arg1 : f32 to i8 | |
linalg.yield %23 : i8 | |
} -> tensor<5x6xi8> | |
%hoisted = util.global.load @hoisted : tensor<f32> | |
%14 = linalg.init_tensor [5, 3] : tensor<5x3xi32> | |
%15 = linalg.fill(%c0_i32, %14) : i32, tensor<5x3xi32> -> tensor<5x3xi32> | |
%hoisted_0 = util.global.load @hoisted_0 : tensor<6x3xi8> | |
%16 = linalg.matmul ins(%13, %hoisted_0 : tensor<5x6xi8>, tensor<6x3xi8>) outs(%15 : tensor<5x3xi32>) -> tensor<5x3xi32> | |
%17 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%18 = linalg.generic {indexing_maps = [#map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%16 : tensor<5x3xi32>) outs(%17 : tensor<5x3xf32>) { | |
^bb0(%arg1: i32, %arg2: f32): // no predecessors | |
%23 = arith.sitofp %arg1 : i32 to f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x3xf32> | |
%19 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%hoisted_1 = util.global.load @hoisted_1 : tensor<f32> | |
%20 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%hoisted_1 : tensor<f32>) outs(%19 : tensor<5x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32): // no predecessors | |
linalg.yield %arg1 : f32 | |
} -> tensor<5x3xf32> | |
%21 = linalg.init_tensor [5, 3] : tensor<5x3xf32> | |
%22 = linalg.generic {indexing_maps = [#map1, #map1, #map1], iterator_types = ["parallel", "parallel"]} ins(%18, %20 : tensor<5x3xf32>, tensor<5x3xf32>) outs(%21 : tensor<5x3xf32>) { | |
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors | |
%23 = arith.divf %arg1, %arg2 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<5x3xf32> | |
return %22 : tensor<5x3xf32> | |
} | |
util.global private @hoisted = dense<0.014932218> : tensor<f32> | |
util.global private @hoisted_0 = dense<[[0, 7, 15], [22, 30, 37], [45, 52, 60], [67, 75, 82], [90, 97, 105], [112, 120, 127]]> : tensor<6x3xi8> | |
util.global private @hoisted_1 = dense<0.074661091> : tensor<f32> | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment