xref: /llvm-project/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1// RUN: mlir-opt %s \
2// RUN: -one-shot-bufferize="bufferize-function-boundaries" \
3// RUN: -buffer-deallocation-pipeline -convert-bufferization-to-memref \
4// RUN: -convert-linalg-to-loops -convert-scf-to-cf -expand-strided-metadata \
5// RUN: -lower-affine -convert-arith-to-llvm --finalize-memref-to-llvm \
6// RUN: -convert-func-to-llvm -reconcile-unrealized-casts | \
7// RUN: mlir-runner -e main -entry-point-result=void \
8// RUN:   -shared-libs=%mlir_runner_utils \
9// RUN: | FileCheck %s
10
11func.func @main() {
12  %const = arith.constant dense<10.0> : tensor<2xf32>
13  %insert_val = arith.constant dense<20.0> : tensor<1xf32>
14  %inserted = tensor.insert_slice %insert_val into %const[0][1][1] : tensor<1xf32> into tensor<2xf32>
15
16  %unranked = tensor.cast %inserted : tensor<2xf32> to tensor<*xf32>
17  call @printMemrefF32(%unranked) : (tensor<*xf32>) -> ()
18
19  //      CHECK: Unranked Memref base@ = {{0x[-9a-f]*}}
20  // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data =
21  // CHECK-NEXT: [20, 10]
22
23  return
24}
25
26func.func private @printMemrefF32(%ptr : tensor<*xf32>)
27