xref: /llvm-project/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1// RUN: mlir-opt %s \
2// RUN: -one-shot-bufferize="bufferize-function-boundaries" \
3// RUN: -buffer-deallocation-pipeline -convert-bufferization-to-memref -convert-linalg-to-loops \
4// RUN: -convert-arith-to-llvm -convert-scf-to-cf -convert-cf-to-llvm --finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
5// RUN: mlir-runner -e main -entry-point-result=void \
6// RUN:   -shared-libs=%mlir_runner_utils \
7// RUN: | FileCheck %s
8
9func.func @foo() -> tensor<4xf32> {
10  %0 = arith.constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
11  return %0 : tensor<4xf32>
12}
13
14func.func @main() {
15  %0 = call @foo() : () -> tensor<4xf32>
16
17  // Instead of relying on tensor_store which introduces aliasing, we rely on
18  // the conversion of printMemrefF32(tensor<*xf32>) to
19  // printMemrefF32(memref<*xf32>).
20  // Note that this is skipping a step and we would need at least some function
21  // attribute to declare that this conversion is valid (e.g. when we statically
22  // know that things will play nicely at the C ABI boundary).
23  %unranked = tensor.cast %0 : tensor<4xf32> to tensor<*xf32>
24  call @printMemrefF32(%unranked) : (tensor<*xf32>) -> ()
25
26  //      CHECK: Unranked Memref base@ = {{0x[-9a-f]*}}
27  // CHECK-SAME: rank = 1 offset = 0 sizes = [4] strides = [1] data =
28  // CHECK-NEXT: [1, 2, 3, 4]
29
30  return
31}
32
33// This gets converted to a function operating on memref<*xf32>.
34// Note that this is skipping a step and we would need at least some function
35// attribute to declare that this conversion is valid (e.g. when we statically
36// know that things will play nicely at the C ABI boundary).
37func.func private @printMemrefF32(%ptr : tensor<*xf32>)
38