xref: /llvm-project/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir (revision 3a223f4414444c3ecc65abb6482df3df87e26b86)
1// RUN: mlir-opt --transform-interpreter %s -split-input-file -verify-diagnostics | FileCheck %s
2
3// Test One-Shot Bufferize.
4
5module attributes {transform.with_named_sequence} {
6  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
7    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
8    %1 = transform.bufferization.one_shot_bufferize %0 : (!transform.any_op) -> !transform.any_op
9    transform.yield
10  }
11}
12
13// CHECK-LABEL: func @test_function(
14//  CHECK-SAME:     %[[A:.*]]: tensor<?xf32>
15func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
16  %c0 = arith.constant 0 : index
17
18  // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
19  // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
20  // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
21  // CHECK: memref.copy %[[A_memref]], %[[alloc]]
22  // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
23  // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
24  %0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
25
26  // CHECK: return %[[res_tensor]]
27  return %0 : tensor<?xf32>
28}
29
30// -----
31
32// Emit linalg.copy instead of memref.copy.
33
34module attributes {transform.with_named_sequence} {
35  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
36    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
37    %1 = transform.bufferization.one_shot_bufferize %0 {memcpy_op = "linalg.copy"} : (!transform.any_op) -> !transform.any_op
38    transform.yield
39  }
40}
41
42// CHECK-LABEL: func @test_function(
43//  CHECK-SAME:     %[[A:.*]]: tensor<?xf32>
44//   CHECK-NOT:   memref.copy
45func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
46  %c0 = arith.constant 0 : index
47
48  // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
49  // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
50  // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
51  // CHECK: linalg.copy ins(%[[A_memref]] : memref<{{.*}}>) outs(%[[alloc]]
52  // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
53  // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
54  %0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
55
56  // CHECK: return %[[res_tensor]]
57  return %0 : tensor<?xf32>
58}
59
60// -----
61
62// Test analysis of One-Shot Bufferize only.
63
64module attributes {transform.with_named_sequence} {
65  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
66    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
67    %1 = transform.bufferization.one_shot_bufferize %0
68        {test_analysis_only = true} : (!transform.any_op) -> !transform.any_op
69    transform.yield
70  }
71}
72
73// CHECK-LABEL: func @test_function_analysis(
74//  CHECK-SAME:     %[[A:.*]]: tensor<?xf32>
75func.func @test_function_analysis(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
76  %c0 = arith.constant 0 : index
77  // CHECK: vector.transfer_write
78  // CHECK-SAME: {__inplace_operands_attr__ = ["none", "false", "none"]}
79  // CHECK-SAME: tensor<?xf32>
80  %0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
81  return %0 : tensor<?xf32>
82}
83
84// -----
85
86// Test One-Shot Bufferize transform failure with an unknown op. This would be
87// allowed with `allow_unknown_ops`.
88
89module attributes {transform.with_named_sequence} {
90  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
91    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
92    // expected-error @+1 {{bufferization failed}}
93    %1 = transform.bufferization.one_shot_bufferize %0 : (!transform.any_op) -> !transform.any_op
94    transform.yield
95  }
96}
97
98func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
99  // expected-error @+1 {{op was not bufferized}}
100  %0 = "test.dummy_op"() : () -> (tensor<?xf32>)
101  return %0 : tensor<?xf32>
102}
103
104// -----
105
106module attributes {transform.with_named_sequence} {
107  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
108    // %arg1 is the module
109    %0 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
110    transform.yield
111  }
112}
113
114module {
115  // CHECK-LABEL: func @test_function(
116  //  CHECK-SAME:     %[[A:.*]]: tensor<?xf32>
117  func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
118    %c0 = arith.constant 0 : index
119
120    // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
121    // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
122    // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
123    // CHECK: memref.copy %[[A_memref]], %[[alloc]]
124    // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
125    // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
126    %0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
127
128    // CHECK: return %[[res_tensor]]
129    return %0 : tensor<?xf32>
130  }
131}
132
133// -----
134
135// Test we use identity layout at function boundaries.
136
137module attributes {transform.with_named_sequence} {
138  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.consumed}) {
139    %0 = transform.bufferization.one_shot_bufferize layout{IdentityLayoutMap} %arg1
140      { bufferize_function_boundaries = true } : (!transform.any_op) -> !transform.any_op
141    transform.yield
142  }
143}
144
145// CHECK: func.func @matmul(
146// CHECK-SAME:  %[[A:.*]]: memref<12x9xf32>,
147// CHECK-SAME:  %[[B:.*]]: memref<9x6xf32>,
148// CHECK-SAME:  %[[C:.*]]: memref<12x6xf32>) -> memref<12x6xf32> {
149func.func @matmul(%A: tensor<12x9xf32>, %B: tensor<9x6xf32>, %C: tensor<12x6xf32>) -> tensor<12x6xf32> {
150  // CHECK: linalg.matmul ins(%[[A]], %[[B]] : memref<12x9xf32>, memref<9x6xf32>) outs(%[[C]] : memref<12x6xf32>)
151  %D = linalg.matmul ins(%A, %B: tensor<12x9xf32>, tensor<9x6xf32>) outs(%C: tensor<12x6xf32>) -> tensor<12x6xf32>
152  // CHECK: return %[[C]] : memref<12x6xf32>
153  return %D : tensor<12x6xf32>
154}
155
156// -----
157
158module attributes {transform.with_named_sequence} {
159  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
160    %0 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.any_op
161    %1 = transform.cast %0 : !transform.any_op to !transform.op<"tensor.empty">
162    transform.bufferization.empty_tensor_to_alloc_tensor %1 : (!transform.op<"tensor.empty">) -> !transform.op<"bufferization.alloc_tensor">
163    transform.yield
164  }
165}
166
167// Expect `bufferization.empty_tensor_to_alloc_tensor` to replace the tensor.empty.
168func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
169  // CHECK: bufferization.alloc_tensor
170  %0 = tensor.empty() : tensor<2x2xf32>
171  return %0 : tensor<2x2xf32>
172}
173
174// -----
175
176module attributes {transform.with_named_sequence} {
177  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
178    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
179    transform.bufferization.eliminate_empty_tensors %0 : !transform.any_op
180    transform.yield
181  }
182}
183
184// CHECK-LABEL: func @empty_tensor_elimination(
185//       CHECK:   tensor.extract_slice
186//       CHECK:   linalg.fill
187//       CHECK:   tensor.insert_slice
188func.func @empty_tensor_elimination(
189    %t: tensor<10xf32>, %f: f32) -> tensor<10xf32> {
190  %0 = tensor.empty() : tensor<5xf32>
191  %1 = linalg.fill ins(%f : f32) outs(%0 : tensor<5xf32>) -> tensor<5xf32>
192  %2 = tensor.insert_slice %1 into %t [1][5][1]
193      : tensor<5xf32> into tensor<10xf32>
194  return %2 : tensor<10xf32>
195}
196
197// -----
198
199module attributes {transform.with_named_sequence} {
200  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
201    %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op
202    transform.bufferization.buffer_loop_hoisting %0 : !transform.any_op
203    transform.yield
204  }
205}
206
207// CHECK-LABEL: func @buffer_loop_hoisting(
208//       CHECK:   memref.alloca
209//       CHECK:   scf.for
210//       CHECK:     memref.store
211func.func @buffer_loop_hoisting(%lb: index, %ub: index, %step: index, %f: f32, %pos: index) {
212  scf.for %iv = %lb to %ub step %step {
213    %0 = memref.alloca() : memref<5xf32>
214    memref.store %f, %0[%pos] : memref<5xf32>
215  }
216  return
217}
218
219// -----
220
221module attributes {transform.with_named_sequence} {
222  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
223    %alloc_tensor = transform.structured.match ops{["bufferization.alloc_tensor"]} in %arg1
224      : (!transform.any_op) -> !transform.op<"bufferization.alloc_tensor">
225    %2, %new = transform.structured.bufferize_to_allocation %alloc_tensor
226      {alloc_op = "memref.alloca"}
227        : !transform.op<"bufferization.alloc_tensor">
228    transform.yield
229  }
230}
231
232// Expect `bufferization.bufferize_to_allocation` to create an alloc.
233//  CHECK-LABEL: func.func @empty_to_tensor_alloc()
234func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
235  // CHECK-NEXT: %[[alloca:.*]] = memref.alloca() : memref<2x2xf32>
236  // CHECK-NEXT: %[[tensor:.*]] = bufferization.to_tensor %[[alloca]] restrict writable : memref<2x2xf32>
237  // CHECK-NEXT: return %[[tensor]] : tensor<2x2xf32>
238  %0 = bufferization.alloc_tensor() : tensor<2x2xf32>
239  return %0 : tensor<2x2xf32>
240}
241