1// RUN: mlir-opt %s \ 2// RUN: -one-shot-bufferize="bufferize-function-boundaries" \ 3// RUN: -buffer-deallocation-pipeline -convert-bufferization-to-memref \ 4// RUN: -convert-linalg-to-loops -convert-scf-to-cf -expand-strided-metadata \ 5// RUN: -lower-affine -convert-arith-to-llvm --finalize-memref-to-llvm \ 6// RUN: -convert-func-to-llvm -reconcile-unrealized-casts | \ 7// RUN: mlir-runner -e main -entry-point-result=void \ 8// RUN: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils \ 9// RUN: | FileCheck %s 10 11func.func @main() { 12 %const = arith.constant dense<10.0> : tensor<2xf32> 13 %insert_val = arith.constant dense<20.0> : tensor<1xf32> 14 15 // Both of these insert_slice ops insert into the same original tensor 16 // value `%const`. This can easily cause bugs if at the memref level 17 // we attempt to write in-place into the memref that %const has been 18 // converted into. 19 %inserted_at_position_0 = tensor.insert_slice %insert_val into %const[0][1][1] : tensor<1xf32> into tensor<2xf32> 20 %inserted_at_position_1 = tensor.insert_slice %insert_val into %const[1][1][1] : tensor<1xf32> into tensor<2xf32> 21 22 %unranked_at_position_0 = tensor.cast %inserted_at_position_0 : tensor<2xf32> to tensor<*xf32> 23 call @printMemrefF32(%unranked_at_position_0) : (tensor<*xf32>) -> () 24 25 // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} 26 // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data = 27 // CHECK-NEXT: [20, 10] 28 29 %unranked_at_position_1 = tensor.cast %inserted_at_position_1 : tensor<2xf32> to tensor<*xf32> 30 call @printMemrefF32(%unranked_at_position_1) : (tensor<*xf32>) -> () 31 32 // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} 33 // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data = 34 // CHECK-NEXT: [10, 20] 35 36 return 37} 38 39func.func private @printMemrefF32(%ptr : tensor<*xf32>) 40