1// RUN: mlir-opt %s \ 2// RUN: -one-shot-bufferize="bufferize-function-boundaries" \ 3// RUN: -buffer-deallocation-pipeline -convert-bufferization-to-memref \ 4// RUN: -convert-scf-to-cf -expand-strided-metadata -lower-affine -convert-cf-to-llvm -convert-arith-to-llvm \ 5// RUN: -finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ 6// RUN: mlir-runner -e main -entry-point-result=void \ 7// RUN: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils \ 8// RUN: | FileCheck %s 9 10 11func.func @main() { 12 %const = arith.constant dense<[[[[-3.9058,0.9072],[-2.9470,-2.2055],[18.3946,8.2997]],[[3.4700,5.9006],[-17.2267,4.9777],[1.0450,-0.8201]]],[[[17.6996,-11.1763],[26.7775,-3.8823],[-4.2492,-5.8966]],[[2.1259,13.1794],[-10.7136,0.8428],[16.4233,9.4589]]]]> : tensor<2x2x3x2xf32> 13 %dynamic = tensor.cast %const: tensor<2x2x3x2xf32> to tensor<2x?x?x?xf32> 14 %collapsed = call @collapse_dynamic_shape(%dynamic) : (tensor<2x?x?x?xf32>) -> (tensor<2x?x?xf32>) 15 %unranked = tensor.cast %collapsed: tensor<2x?x?xf32> to tensor<*xf32> 16 call @printMemrefF32(%unranked) : (tensor<*xf32>) -> () 17 // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} 18 // CHECK-SAME: rank = 3 offset = 0 sizes = [2, 6, 2] strides = [12, 2, 1] data = 19 // CHECK-NEXT{LITERAL}: [[[-3.9058, 0.9072], 20 // CHECK-NEXT: [-2.947, -2.2055], 21 // CHECK-NEXT: [18.3946, 8.2997], 22 // CHECK-NEXT: [3.47, 5.9006], 23 // CHECK-NEXT: [-17.2267, 4.9777], 24 // CHECK-NEXT: [1.045, -0.8201]], 25 // CHECK-NEXT{LITERAL}: [[17.6996, -11.1763], 26 // CHECK-NEXT: [26.7775, -3.8823], 27 // CHECK-NEXT: [-4.2492, -5.8966], 28 // CHECK-NEXT: [2.1259, 13.1794], 29 // CHECK-NEXT: [-10.7136, 0.8428], 30 // CHECK-NEXT: [16.4233, 9.4589]]] 31 return 32} 33 34func.func private @printMemrefF32(%ptr : tensor<*xf32>) 35 36func.func @collapse_dynamic_shape(%arg0 : tensor<2x?x?x?xf32>) -> tensor<2x?x?xf32> { 37 %0 = tensor.collapse_shape %arg0 [[0], [1, 2], [3]]: tensor<2x?x?x?xf32> into tensor<2x?x?xf32> 38 return %0 : tensor<2x?x?xf32> 39} 40