xref: /llvm-project/mlir/test/Dialect/MemRef/multibuffer.mlir (revision a8aeb651cdae4e687500575108e12c89e540f59c)
1// RUN: mlir-opt %s -allow-unregistered-dialect -test-multi-buffering=multiplier=5 -cse -split-input-file | FileCheck %s
2
3// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (((d0 - 1) floordiv 3) mod 5)>
4
5// CHECK-LABEL: func @multi_buffer
6func.func @multi_buffer(%a: memref<1024x1024xf32>) {
7// CHECK-DAG: %[[A:.*]] = memref.alloc() {someAttribute} : memref<5x4x128xf32>
8// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
9// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
10  %0 = memref.alloc() {someAttribute} : memref<4x128xf32>
11  %c1024 = arith.constant 1024 : index
12  %c1 = arith.constant 1 : index
13  %c3 = arith.constant 3 : index
14// CHECK: scf.for %[[IV:.*]] = %[[C1]]
15  scf.for %arg2 = %c1 to %c1024 step %c3 {
16// CHECK: %[[I:.*]] = affine.apply #[[$MAP1]](%[[IV]])
17// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, strided<[128, 1], offset: ?>>
18   %1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] :
19    memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>>
20// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, strided<[128, 1], offset: ?>>
21   memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32>
22// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
23    "some_use"(%0) : (memref<4x128xf32>) -> ()
24// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
25   "some_use"(%0) : (memref<4x128xf32>) -> ()
26  }
27  return
28}
29
30// -----
31
32// CHECK-LABEL: func @multi_buffer_affine
33func.func @multi_buffer_affine(%a: memref<1024x1024xf32>) {
34// CHECK-DAG: %[[A:.*]] = memref.alloc() : memref<5x4x128xf32>
35  %0 = memref.alloc() : memref<4x128xf32>
36  %c1024 = arith.constant 1024 : index
37  %c1 = arith.constant 1 : index
38  %c3 = arith.constant 3 : index
39// CHECK: affine.for %[[IV:.*]] = 1
40  affine.for %arg2 = 1 to 1024 step 3 {
41// CHECK: %[[I:.*]] = affine.apply #[[$MAP1]](%[[IV]])
42// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, strided<[128, 1], offset: ?>>
43   %1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] :
44    memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>>
45// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, strided<[128, 1], offset: ?>>
46   memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32>
47// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
48    "some_use"(%0) : (memref<4x128xf32>) -> ()
49// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
50   "some_use"(%0) : (memref<4x128xf32>) -> ()
51  }
52  return
53}
54
55// -----
56
57// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (((d0 - 1) floordiv 3) mod 5)>
58
59// CHECK-LABEL: func @multi_buffer_subview_use
60func.func @multi_buffer_subview_use(%a: memref<1024x1024xf32>) {
61// CHECK-DAG: %[[A:.*]] = memref.alloc() : memref<5x4x128xf32>
62// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
63// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
64  %0 = memref.alloc() : memref<4x128xf32>
65  %c1024 = arith.constant 1024 : index
66  %c1 = arith.constant 1 : index
67  %c3 = arith.constant 3 : index
68// CHECK: scf.for %[[IV:.*]] = %[[C1]]
69  scf.for %arg2 = %c1 to %c1024 step %c3 {
70// CHECK: %[[I:.*]] = affine.apply #[[$MAP1]](%[[IV]])
71// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, strided<[128, 1], offset: ?>>
72   %1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] :
73    memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>>
74// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, strided<[128, 1], offset: ?>>
75   memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32>
76// CHECK: %[[SV1:.*]] = memref.subview %[[SV]][0, 1] [4, 127] [1, 1] : memref<4x128xf32, strided<[128, 1], offset: ?>> to memref<4x127xf32, strided<[128, 1], offset: ?>>
77   %s = memref.subview %0[0, 1] [4, 127] [1, 1] :
78      memref<4x128xf32> to memref<4x127xf32, affine_map<(d0, d1) -> (d0 * 128 + d1 + 1)>>
79// CHECK: "some_use"(%[[SV1]]) : (memref<4x127xf32, strided<[128, 1], offset: ?>>) -> ()
80   "some_use"(%s) : (memref<4x127xf32, affine_map<(d0, d1) -> (d0 * 128 + d1 + 1)>>) -> ()
81// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided<[128, 1], offset: ?>>) -> ()
82   "some_use"(%0) : (memref<4x128xf32>) -> ()
83  }
84  return
85}
86
87// -----
88
89// CHECK-LABEL: func @multi_buffer_negative
90func.func @multi_buffer_negative(%a: memref<1024x1024xf32>) {
91// CHECK-NOT: %{{.*}} = memref.alloc() : memref<5x4x128xf32>
92//     CHECK: %{{.*}} = memref.alloc() : memref<4x128xf32>
93  %0 = memref.alloc() : memref<4x128xf32>
94  %c1024 = arith.constant 1024 : index
95  %c0 = arith.constant 0 : index
96  %c3 = arith.constant 3 : index
97  scf.for %arg2 = %c0 to %c1024 step %c3 {
98   "blocking_use"(%0) : (memref<4x128xf32>) -> ()
99   %1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] :
100    memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>>
101   memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32>
102   "some_use"(%0) : (memref<4x128xf32>) -> ()
103  }
104  return
105}
106
107