xref: /llvm-project/mlir/test/Dialect/MemRef/make-loop-independent.mlir (revision e4384149b58f7c3d19c5d38bc46038c660b77ca9)
1// RUN: mlir-opt %s -allow-unregistered-dialect \
2// RUN:     -transform-interpreter -canonicalize \
3// RUN:     -split-input-file -verify-diagnostics | FileCheck %s
4
5// CHECK: #[[$map:.*]] = affine_map<()[s0] -> (s0 - 1)>
6// CHECK-LABEL: func @make_alloca_loop_independent(
7//  CHECK-SAME:     %[[lb:.*]]: index, %[[ub:.*]]: index, %[[step:.*]]: index)
8func.func @make_alloca_loop_independent(%lb: index, %ub: index, %step: index) {
9  %cst = arith.constant 5.5 : f32
10  %c0 = arith.constant 0 : index
11  // CHECK: scf.for %[[iv:.*]] = %[[lb]] to %[[ub]]
12  scf.for %i = %lb to %ub step %step {
13    // CHECK: %[[sz:.*]] = affine.apply #[[$map]]()[%[[ub]]]
14    // CHECK: %[[alloca:.*]] = memref.alloca(%[[sz]])
15    // CHECK: %[[subview:.*]] = memref.subview %[[alloca]][0] [%[[iv]]] [1] : memref<?xf32> to memref<?xf32, strided<[1]>>
16    // CHECK: %[[cast:.*]] = builtin.unrealized_conversion_cast %[[subview]] : memref<?xf32, strided<[1]>> to memref<?xf32>
17    %alloc = memref.alloca(%i) : memref<?xf32>
18
19    // memref.subview has special handling.
20    // CHECK: %[[subview2:.*]] = memref.subview %[[subview]][1] [5] [1] : memref<?xf32, strided<[1]>> to memref<5xf32, strided<[1], offset: 1>>
21    %view = memref.subview %alloc[1][5][1] : memref<?xf32> to memref<5xf32, strided<[1], offset: 1>>
22
23    // This op takes a memref but does not produce one. The new alloc is used
24    // directly.
25    // CHECK: "test.some_use"(%[[subview2]])
26    "test.some_use"(%view) : (memref<5xf32, strided<[1], offset: 1>>) -> ()
27
28    // This op produces a memref, so the new alloc cannot be used directly.
29    // It is wrapped in a unrealized_conversion_cast.
30    // CHECK: "test.another_use"(%[[cast]]) : (memref<?xf32>) -> memref<?xf32>
31    "test.another_use"(%alloc) : (memref<?xf32>) -> (memref<?xf32>)
32
33    // CHECK: memref.store %{{.*}}, %[[subview]]
34    memref.store %cst, %alloc[%c0] : memref<?xf32>
35  }
36  return
37}
38module attributes {transform.with_named_sequence} {
39  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
40    %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.any_op
41    %1 = transform.memref.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
42    transform.yield
43  }
44}
45
46// -----
47
48// CHECK: #[[$map:.*]] = affine_map<(d0) -> (-d0 + 128)>
49// CHECK-LABEL: func @make_alloca_loop_independent_static(
50func.func @make_alloca_loop_independent_static(%step: index) {
51  %cst = arith.constant 5.5 : f32
52  %c0 = arith.constant 0 : index
53  %ub = arith.constant 128 : index
54  // CHECK: scf.for %[[iv:.*]] =
55  scf.for %i = %c0 to %ub step %step {
56    // CHECK: %[[sz:.*]] = affine.apply #[[$map]](%[[iv]])
57    %sz = affine.apply affine_map<(d0)[s0] -> (-d0 + s0)>(%i)[%ub]
58
59    // CHECK: %[[alloca:.*]] = memref.alloca() : memref<128xf32>
60    // CHECK: %[[subview:.*]] = memref.subview %[[alloca]][0] [%[[sz]]] [1] : memref<128xf32> to memref<?xf32, strided<[1]>>
61    %alloc = memref.alloca(%sz) : memref<?xf32>
62
63    // CHECK: memref.store %{{.*}}, %[[subview]]
64    memref.store %cst, %alloc[%c0] : memref<?xf32>
65
66    // CHECK: vector.print %[[sz]]
67    %dim = memref.dim %alloc, %c0 : memref<?xf32>
68    vector.print %dim : index
69  }
70  return
71}
72module attributes {transform.with_named_sequence} {
73  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
74    %0 = transform.structured.match ops{["memref.alloca"]} in %arg1 : (!transform.any_op) -> !transform.any_op
75    %1 = transform.memref.make_loop_independent %0 {num_loops = 1} : (!transform.any_op) -> !transform.any_op
76    transform.yield
77  }
78}
79