1// RUN: mlir-opt %s -test-linalg-transform-patterns=test-linalg-to-vector-patterns \ 2// RUN: -one-shot-bufferize="bufferize-function-boundaries" \ 3// RUN: -buffer-deallocation-pipeline -convert-bufferization-to-memref \ 4// RUN: -convert-linalg-to-loops -convert-scf-to-cf -expand-strided-metadata \ 5// RUN: -lower-affine -convert-arith-to-llvm -finalize-memref-to-llvm -convert-func-to-llvm -convert-cf-to-llvm -reconcile-unrealized-casts | \ 6// RUN: mlir-runner -e main -entry-point-result=void \ 7// RUN: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils \ 8// RUN: | FileCheck %s 9 10 11func.func @main() { 12 %const = arith.constant dense<[[[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]]> : tensor<1x2x3xf32> 13 %dynamic = tensor.cast %const: tensor<1x2x3xf32> to tensor<1x?x3xf32> 14 %offset = arith.constant 2 : index 15 %cst = arith.constant 2.3 : f32 16 %c0 = arith.constant 0 : index 17 %out = tensor.pad %dynamic low[%c0, %offset, %c0] high[%c0, %c0, %offset] { 18 ^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index): 19 tensor.yield %cst : f32 20 } : tensor<1x?x3xf32> to tensor<1x?x?xf32> 21 %unranked = tensor.cast %out: tensor<1x?x?xf32> to tensor<*xf32> 22 call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () 23 24 // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} 25 // CHECK-SAME: rank = 3 offset = 0 sizes = [1, 4, 5] strides = [20, 5, 1] data = 26 // CHECK-NEXT{LITERAL}: [[[2.3, 2.3, 2.3, 2.3, 2.3], 27 // CHECK-NEXT: [2.3, 2.3, 2.3, 2.3, 2.3], 28 // CHECK-NEXT: [1, 2, 3, 2.3, 2.3], 29 // CHECK-NEXT: [2, 3, 4, 2.3, 2.3]]] 30 31 return 32} 33 34func.func private @printMemrefF32(%ptr : tensor<*xf32>) 35