benoitjacob@cloud:~/iree-build-linux$ iree/tools/iree-translate -iree-mlir-to-vm-bytecode-module -mlir-print-op-on-diagnostic=false --iree-hal-target-backends=dylib-llvm-aot a.mlir -o /usr/local/google/home/benoitjacob/iree-build-linux/iree/test/e2e/regression/e2e_matmul_mmt4d_f32_large_dylib-llvm-aot_dylib.vmfb -iree-llvm-embedded-linker-path="/usr/local/google/home/benoitjacob/iree-build-linux/third_party/llvm-project/llvm/bin/lld" -print-ir-after-all
// -----// IR Dump After mlir::iree_compiler::Shape::(anonymous namespace)::ExpandFunctionDynamicDimsPass //----- //
module  {
  func @matmul_123x456xf32_times_456x789xf32_into_123x789xf32(%arg0: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
    %c5 = arith.constant 5 : index
    %c0 = arith.constant 0 : index
    %cst = arith.constant 0.000000e+00 : f32
    %c2 = arith.constant 2 : index
    %0 = hal.tensor.cast %arg0 : !hal.buffer_view -> tensor<123x456xf32>
    %1 = linalg.pad_tensor %0 low[0, 0] high[%c5, %c0]  {
    ^bb0(%arg1: index, %arg2: index):  // no predecessors
      linalg.yield %cst : f32
    } : tensor<123x456xf32> to tensor<?x?xf32>
    %2 = linalg.tensor_expand_shape %1 [[0, 1], [2, 3]] : tensor<?x?xf32> into tensor<?x8x?x1xf32>
    %3 = tensor.dim %2, %c0 : tensor<?x8x?x1xf32>
    %4 = tensor.dim %2, %c2 : tensor<?x8x?x1xf32>
    %5 = hal.tensor.cast %2 : tensor<?x8x?x1xf32>{%3, %4} -> !hal.buffer_view
    return %5 : !hal.buffer_view
  }
}
/usr/local/google/home/benoitjacob/iree-build-linux/iree/test/e2e/regression/e2e_matmul_mmt4d_f32_large_dylib-llvm-aot_dylib_module.opt.mlir:10:10: error: 'linalg.tensor_expand_shape' op expected dimension 0 of collapsed type to be dynamic since one or more of the corresponding dimensions in the expanded type is dynamic
    %3 = linalg.tensor_expand_shape %0 [[0, 1], [2, 3]] : tensor<?x?xf32> into tensor<?x8x?x1xf32>
         ^
/usr/local/google/home/benoitjacob/iree-build-linux/iree/test/e2e/regression/e2e_matmul_mmt4d_f32_large_dylib-llvm-aot_dylib_module.opt.mlir:2:3: note: called from
  func @matmul_123x456xf32_times_456x789xf32_into_123x789xf32(%arg0: tensor<123x456xf32>) -> tensor<?x8x?x1xf32> {
  ^
// -----// IR Dump After PadTensorToSubTensorInsert Failed //----- //
"builtin.module"() ( {
  "builtin.func"() ( {
  ^bb0(%arg0: !hal.buffer_view):  // no predecessors
    %0 = "arith.constant"() {value = 0 : index} : () -> index
    %1 = "arith.constant"() {value = 0.000000e+00 : f32} : () -> f32
    %2 = "arith.constant"() {value = 2 : index} : () -> index
    %3 = "arith.constant"() {value = 123 : index} : () -> index
    %4 = "arith.constant"() {value = 456 : index} : () -> index
    %5 = "hal.tensor.cast"(%arg0) {operand_segment_sizes = dense<[1, 0, 0]> : vector<3xi32>} : (!hal.buffer_view) -> tensor<123x456xf32>
    %6 = "linalg.init_tensor"() {static_sizes = [128, 456]} : () -> tensor<128x456xf32>
    %7 = "linalg.fill"(%1, %6) ( {
    ^bb0(%arg1: f32, %arg2: f32):  // no predecessors
      "linalg.yield"(%arg1) : (f32) -> ()
    }) : (f32, tensor<128x456xf32>) -> tensor<128x456xf32>
    %8 = "tensor.insert_slice"(%5, %7, %3, %4) {operand_segment_sizes = dense<[1, 1, 0, 2, 0]> : vector<5xi32>, static_offsets = [0, 0], static_sizes = [-1, -1], static_strides = [1, 1]} : (tensor<123x456xf32>, tensor<128x456xf32>, index, index) -> tensor<128x456xf32>
    %9 = "linalg.tensor_expand_shape"(%8) {reassociation = [[0, 1], [2, 3]]} : (tensor<128x456xf32>) -> tensor<?x8x?x1xf32>
    %10 = "tensor.dim"(%9, %0) : (tensor<?x8x?x1xf32>, index) -> index
    %11 = "tensor.dim"(%9, %2) : (tensor<?x8x?x1xf32>, index) -> index
    %12 = "hal.tensor.cast"(%9, %10, %11) {operand_segment_sizes = dense<[1, 2, 0]> : vector<3xi32>} : (tensor<?x8x?x1xf32>, index, index) -> !hal.buffer_view
    "std.return"(%12) : (!hal.buffer_view) -> ()
  }) {iree.abi.stub, sym_name = "matmul_123x456xf32_times_456x789xf32_into_123x789xf32", type = (!hal.buffer_view) -> !hal.buffer_view} : () -> ()
}) : () -> ()
          Created
          November 10, 2021 03:00 
        
      - 
      
 - 
        
Save bjacob/c9193489d4c10fce9ea71bdb18cbe267 to your computer and use it in GitHub Desktop.  
  
    
      This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
      Learn more about bidirectional Unicode characters
    
  
  
    
  | module { | |
| func @matmul_123x456xf32_times_456x789xf32_into_123x789xf32(%arg0: tensor<123x456xf32>) -> tensor<?x8x?x1xf32> { | |
| %c5 = arith.constant 5 : index | |
| %c0 = arith.constant 0 : index | |
| %cst = arith.constant 0.000000e+00 : f32 | |
| %0 = linalg.pad_tensor %arg0 low[0, 0] high[%c5, %c0] { | |
| ^bb0(%arg3: index, %arg4: index): // no predecessors | |
| linalg.yield %cst : f32 | |
| } : tensor<123x456xf32> to tensor<?x?xf32> | |
| %3 = linalg.tensor_expand_shape %0 [[0, 1], [2, 3]] : tensor<?x?xf32> into tensor<?x8x?x1xf32> | |
| return %3 : tensor<?x8x?x1xf32> | |
| } | |
| } | 
  
    Sign up for free
    to join this conversation on GitHub.
    Already have an account?
    Sign in to comment