xref: /llvm-project/mlir/test/Interfaces/TilingInterface/tile-using-interface.mlir (revision d28a4f1fc02dc34a87fa22af0a053e8f1e7f6cea)
1// RUN: mlir-opt --transform-interpreter --cse -split-input-file %s | FileCheck %s
2
3func.func @simple_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
4    %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
5  %0 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
6      outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
7  return %0 : tensor<?x?xf32>
8}
9
10module attributes {transform.with_named_sequence} {
11  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
12    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
13      : (!transform.any_op) -> !transform.any_op
14    %a, %b, %c = transform.structured.tile_using_for %matmul tile_sizes [10, 20]
15      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
16    transform.yield
17  }
18}
19//  CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)>
20//  CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)>
21//      CHECK-LABEL: func.func @simple_matmul(
22// CHECK-SAME:     %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
23// CHECK-SAME:     %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32>
24// CHECK-SAME:     %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x?xf32>
25//  CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
26//  CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
27//  CHECK-DAG:   %[[M:.+]] = tensor.dim %[[ARG0]], %[[C0]]
28//  CHECK-DAG:   %[[K:.+]] = tensor.dim %[[ARG0]], %[[C1]]
29//  CHECK-DAG:   %[[N:.+]] = tensor.dim %[[ARG1]], %[[C1]]
30//  CHECK-DAG:   %[[C10:.+]] = arith.constant 10 : index
31//  CHECK-DAG:   %[[C20:.+]] = arith.constant 20 : index
32//      CHECK:   %[[OUTER:[a-zA-Z0-9]+]] = scf.for %[[IV0:[a-zA-Z0-9]+]] = %[[C0]] to %[[M]] step %[[C10]]
33// CHECK-SAME:       iter_args(%[[INIT0:.+]] = %[[ARG2]])
34//      CHECK:     %[[INNER:[a-zA-Z0-9]+]] = scf.for %[[IV1:[a-zA-Z0-9]+]] = %[[C0]] to %[[N]] step %[[C20]]
35// CHECK-SAME:         iter_args(%[[INIT1:.+]] = %[[INIT0]])
36//  CHECK-DAG:       %[[TS_Y:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[M]]]
37//      CHECK:       %[[TS_X:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[N]]]
38//  CHECK-DAG:       %[[LHS_TILE:.+]] = tensor.extract_slice %[[ARG0]]
39// CHECK-SAME:           [%[[IV0]], 0] [%[[TS_Y]], %[[K]]] [1, 1]
40//  CHECK-DAG:       %[[RHS_TILE:.+]] = tensor.extract_slice %[[ARG1]]
41// CHECK-SAME:           [0, %[[IV1]]] [%[[K]], %[[TS_X]]] [1, 1]
42//  CHECK-DAG:       %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT1]]
43// CHECK-SAME:           [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1]
44//      CHECK:       %[[GEMM_TILE:.+]] = linalg.matmul
45// CHECK-SAME:           ins(%[[LHS_TILE]], %[[RHS_TILE]] :
46// CHECK-SAME:           outs(%[[INIT_TILE]] :
47//      CHECK:       %[[UPDATE:.+]] = tensor.insert_slice %[[GEMM_TILE]] into %[[INIT1]]
48// CHECK-SAME:           [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1]
49//      CHECK:       scf.yield %[[UPDATE]]
50//      CHECK:     scf.yield %[[INNER]]
51//      CHECK:   return %[[OUTER]]
52
53// -----
54
55func.func @simple_matmul_memref(%arg0 : memref<?x?xf32>, %arg1 : memref<?x?xf32>,
56    %arg2 : memref<?x?xf32>) {
57  linalg.matmul ins(%arg0, %arg1 : memref<?x?xf32>, memref<?x?xf32>)
58      outs(%arg2 : memref<?x?xf32>)
59  return
60}
61
62module attributes {transform.with_named_sequence} {
63  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
64    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
65      : (!transform.any_op) -> !transform.any_op
66    %a, %b, %c, %d = transform.structured.tile_using_for %matmul tile_sizes [10, 20, 30]
67      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
68    transform.yield
69  }
70}
71//  CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)>
72//  CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)>
73//  CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 30)>
74//      CHECK-LABEL: func.func @simple_matmul_memref(
75// CHECK-SAME:     %[[ARG0:[a-zA-Z0-9]+]]: memref<?x?xf32>
76// CHECK-SAME:     %[[ARG1:[a-zA-Z0-9]+]]: memref<?x?xf32>
77// CHECK-SAME:     %[[ARG2:[a-zA-Z0-9]+]]: memref<?x?xf32>
78//  CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
79//  CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
80//  CHECK-DAG:   %[[M:.+]] = memref.dim %[[ARG0]], %[[C0]]
81//  CHECK-DAG:   %[[K:.+]] = memref.dim %[[ARG0]], %[[C1]]
82//  CHECK-DAG:   %[[N:.+]] = memref.dim %[[ARG1]], %[[C1]]
83//  CHECK-DAG:   %[[C10:.+]] = arith.constant 10 : index
84//  CHECK-DAG:   %[[C20:.+]] = arith.constant 20 : index
85//  CHECK-DAG:   %[[C30:.+]] = arith.constant 30 : index
86//      CHECK:   scf.for %[[IV0:[a-zA-Z0-9]+]] = %[[C0]] to %[[M]] step %[[C10]]
87//      CHECK:     scf.for %[[IV1:[a-zA-Z0-9]+]] = %[[C0]] to %[[N]] step %[[C20]]
88//      CHECK:       scf.for %[[IV2:[a-zA-Z0-9]+]] = %[[C0]] to %[[K]] step %[[C30]]
89//  CHECK-DAG:         %[[TS_M:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[M]]]
90//  CHECK-DAG:         %[[TS_N:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[N]]]
91//  CHECK-DAG:         %[[TS_K:.+]] = affine.min #[[$MAP2]](%[[IV2]])[%[[K]]]
92//  CHECK-DAG:         %[[LHS_TILE:.+]] = memref.subview %[[ARG0]]
93// CHECK-SAME:             [%[[IV0]], %[[IV2]]] [%[[TS_M]], %[[TS_K]]] [1, 1]
94//  CHECK-DAG:         %[[RHS_TILE:.+]] = memref.subview %[[ARG1]]
95// CHECK-SAME:             [%[[IV2]], %[[IV1]]] [%[[TS_K]], %[[TS_N]]] [1, 1]
96//  CHECK-DAG:         %[[OUT_TILE:.+]] = memref.subview %[[ARG2]]
97// CHECK-SAME:             [%[[IV0]], %[[IV1]]] [%[[TS_M]], %[[TS_N]]] [1, 1]
98//      CHECK:         linalg.matmul
99// CHECK-SAME:             ins(%[[LHS_TILE]], %[[RHS_TILE]] :
100// CHECK-SAME:             outs(%[[OUT_TILE]] :
101
102// -----
103
104#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
105#map1 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
106#map2 = affine_map<(d0, d1, d2) -> (d2, d0, d1)>
107func.func @multi_result(%arg0 : tensor<128x200x300xf32>) -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) {
108  %init0 = tensor.empty() : tensor<128x300x200xf32>
109  %init1 = tensor.empty() : tensor<300x128x200xf32>
110  %0:2 = linalg.generic {
111      indexing_maps = [#map0, #map1, #map2],
112      iterator_types = ["parallel", "parallel", "parallel"]}
113      ins(%arg0 : tensor<128x200x300xf32>)
114      outs(%init0, %init1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>) {
115    ^bb0(%b0 : f32, %b1 : f32, %b2 : f32):
116      linalg.yield %b0, %b0 : f32, f32
117    } -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>)
118  return %0#0, %0#1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>
119}
120
121module attributes {transform.with_named_sequence} {
122  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
123    %generic = transform.structured.match ops{["linalg.generic"]} in %arg1
124      : (!transform.any_op) -> !transform.any_op
125    %a, %b, %c = transform.structured.tile_using_for %generic tile_sizes [10, 0, 20]
126      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
127    transform.yield
128  }
129}
130//   CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0) -> (-d0 + 128, 10)>
131// CHECK-LABEL: func.func @multi_result(
132//  CHECK-SAME:     %[[ARG0:[a-zA-Z0-9]+]]: tensor<128x200x300xf32>)
133//   CHECK-DAG:   %[[INIT0:.+]] = tensor.empty()
134//   CHECK-DAG:   %[[INIT1:.+]] = tensor.empty()
135//   CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
136//   CHECK-DAG:   %[[C128:.+]] = arith.constant 128 : index
137//   CHECK-DAG:   %[[C300:.+]] = arith.constant 300 : index
138//   CHECK-DAG:   %[[C10:.+]] = arith.constant 10 : index
139//   CHECK-DAG:   %[[C20:.+]] = arith.constant 20 : index
140//       CHECK:   %[[OUTER:[a-zA-Z0-9]+]]:2 = scf.for %[[IV0:[a-zA-Z0-9]+]] = %[[C0]] to %[[C128]] step %[[C10]]
141//  CHECK-SAME:       iter_args(%[[ARG1:[a-zA-Z0-9]+]] = %[[INIT0]], %[[ARG2:[a-zA-Z0-9]+]] = %[[INIT1]])
142//       CHECK:     %[[INNER:[a-zA-Z0-9]+]]:2 = scf.for %[[IV1:[a-zA-Z0-9]+]] = %[[C0]] to %[[C300]] step %[[C20]]
143//  CHECK-SAME:         iter_args(%[[ARG3:[a-zA-Z0-9]+]] = %[[ARG1]], %[[ARG4:[a-zA-Z0-9]+]] = %[[ARG2]])
144//   CHECK-DAG:       %[[TS_Y:.+]] = affine.min #[[$MAP0]](%[[IV0]])
145//   CHECK-DAG:       %[[ARG_TILE:.+]] = tensor.extract_slice %[[ARG0]]
146//  CHECK-SAME:           [%[[IV0]], 0, %[[IV1]]] [%[[TS_Y]], 200, 20] [1, 1, 1]
147//   CHECK-DAG:       %[[INIT0_TILE:.+]] = tensor.extract_slice %[[ARG3]]
148//  CHECK-SAME:           [%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1]
149//   CHECK-DAG:       %[[INIT1_TILE:.+]] = tensor.extract_slice %[[ARG4]]
150//  CHECK-SAME:           [%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1]
151//       CHECK:       %[[RESULT_TILE:.+]]:2 = linalg.generic
152//  CHECK-SAME:           ins(%[[ARG_TILE]] :
153//  CHECK-SAME:           outs(%[[INIT0_TILE]], %[[INIT1_TILE]] :
154//       CHECK:       %[[UPDATE0:.+]] = tensor.insert_slice %[[RESULT_TILE]]#0 into %[[ARG3]]
155//  CHECK-SAME:           [%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1]
156//       CHECK:       %[[UPDATE1:.+]] = tensor.insert_slice %[[RESULT_TILE]]#1 into %[[ARG4]]
157//  CHECK-SAME:           [%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1]
158//       CHECK:       scf.yield %[[UPDATE0]], %[[UPDATE1]]
159//       CHECK:     scf.yield %[[INNER]]#0, %[[INNER]]#1
160//       CHECK:   return %[[OUTER]]#0, %[[OUTER]]#1
161
162// -----
163
164func.func @conv2D(%arg0 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?xf32>,
165    %arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
166  %0 = linalg.conv_2d_nhwc_hwcf {
167      strides = dense<[2, 3]> : tensor<2xi64>,
168      dilation = dense<[4, 5]> : tensor<2xi64>}
169      ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>)
170      outs(%arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
171  return %0 : tensor<?x?x?x?xf32>
172}
173
174module attributes {transform.with_named_sequence} {
175  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
176    %conv = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1
177      : (!transform.any_op) -> !transform.any_op
178    %a, %b, %c, %d = transform.structured.tile_using_for %conv tile_sizes [0, 0, 0, 0, 10, 20, 30]
179      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
180    transform.yield
181  }
182}
183//  CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)>
184//  CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)>
185//  CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 30)>
186//  CHECK-DAG: #[[$MAP3:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 2 - 2)>
187//  CHECK-DAG: #[[$MAP4:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 3 - 3)>
188//      CHECK-LABEL: func.func @conv2D(
189// CHECK-SAME:     %[[INPUT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
190// CHECK-SAME:     %[[FILTER:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
191// CHECK-SAME:     %[[INIT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
192//  CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
193//  CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
194//  CHECK-DAG:   %[[C2:.+]] = arith.constant 2 : index
195//  CHECK-DAG:   %[[C3:.+]] = arith.constant 3 : index
196//  CHECK-DAG:   %[[N:.+]] = tensor.dim %[[INPUT]], %[[C0]]
197//  CHECK-DAG:   %[[C:.+]] = tensor.dim %[[INPUT]], %[[C3]]
198//  CHECK-DAG:   %[[P:.+]] = tensor.dim %[[FILTER]], %[[C0]]
199//  CHECK-DAG:   %[[Q:.+]] = tensor.dim %[[FILTER]], %[[C1]]
200//  CHECK-DAG:   %[[F:.+]] = tensor.dim %[[FILTER]], %[[C3]]
201//  CHECK-DAG:   %[[R:.+]] = tensor.dim %[[INIT]], %[[C1]]
202//  CHECK-DAG:   %[[S:.+]] = tensor.dim %[[INIT]], %[[C2]]
203//  CHECK-DAG:   %[[C10:.+]] = arith.constant 10 : index
204//  CHECK-DAG:   %[[C20:.+]] = arith.constant 20 : index
205//  CHECK-DAG:   %[[C30:.+]] = arith.constant 30 : index
206//      CHECK:   scf.for %[[IV0:[a-zA-Z0-9]+]] = %[[C0]] to %[[P]] step %[[C10]]
207// CHECK-SAME:       iter_args(%[[INIT0:.+]] = %[[INIT]])
208//      CHECK:     scf.for %[[IV1:[a-zA-Z0-9]+]] = %[[C0]] to %[[Q]] step %[[C20]]
209// CHECK-SAME:         iter_args(%[[INIT1:.+]] = %[[INIT0]])
210//      CHECK:       scf.for %[[IV2:[a-zA-Z0-9]+]] = %[[C0]] to %[[C]] step %[[C30]]
211// CHECK-SAME:           iter_args(%[[INIT2:.+]] = %[[INIT1]])
212//  CHECK-DAG:         %[[TS_P:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[P]]]
213//  CHECK-DAG:         %[[TS_Q:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[Q]]]
214//  CHECK-DAG:         %[[TS_C:.+]] = affine.min #[[$MAP2]](%[[IV2]])[%[[C]]]
215//  CHECK-DAG:         %[[TS_H:.+]] = affine.apply #[[$MAP3]](%[[TS_P]])[%[[R]]]
216//  CHECK-DAG:         %[[TS_W:.+]] = affine.apply #[[$MAP4]](%[[TS_Q]])[%[[S]]]
217//  CHECK-DAG:         %[[INPUT_TILE:.+]] = tensor.extract_slice %[[INPUT]]
218// CHECK-SAME:             [0, %[[IV0]], %[[IV1]], %[[IV2]]] [%[[N]], %[[TS_H]], %[[TS_W]], %[[TS_C]]]
219//  CHECK-DAG:         %[[FILTER_TILE:.+]] = tensor.extract_slice %[[FILTER]]
220// CHECK-SAME:             [%[[IV0]], %[[IV1]], %[[IV2]], 0] [%[[TS_P]], %[[TS_Q]], %[[TS_C]], %[[F]]]
221//  CHECK-DAG:         %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT2]]
222// CHECK-SAME:             [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]]
223//      CHECK:         %[[CONV_TILE:.+]] = linalg.conv_2d_nhwc_hwcf
224// CHECK-SAME:             dilation = dense<[4, 5]> : tensor<2xi64>, strides = dense<[2, 3]> : tensor<2xi64>
225// CHECK-SAME:             ins(%[[INPUT_TILE]], %[[FILTER_TILE]] :
226// CHECK-SAME:             outs(%[[INIT_TILE]] :
227//      CHECK:         tensor.insert_slice %[[CONV_TILE]] into %[[INIT2]]
228// CHECK-SAME:             [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]]
229
230// -----
231
232func.func @indexed_semantics(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
233  // Check that we correctly amend "linalg.index" results.
234
235  %0 = linalg.generic {
236    indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
237                     affine_map<(d0, d1) -> (d0, d1)>],
238    iterator_types = ["parallel", "parallel"]}
239    ins(%arg0: tensor<?x?xf32>)
240    outs(%arg1: tensor<?x?xf32>) {
241  ^bb0(%arg2: f32, %arg3: f32):
242    %1 = linalg.index 0 : index
243    %2 = linalg.index 1 : index
244    %3 = arith.addi %1, %2 : index
245    %4 = arith.index_cast %3 : index to i64
246    %5 = arith.uitofp %4 : i64 to f32
247    %6 = arith.addf %5, %arg2 : f32
248    linalg.yield %6 : f32
249  } -> (tensor<?x?xf32>)
250  return %0 : tensor<?x?xf32>
251}
252
253module attributes {transform.with_named_sequence} {
254  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
255    %generic = transform.structured.match ops{["linalg.generic"]} in %arg1
256      : (!transform.any_op) -> !transform.any_op
257    %a, %b, %c = transform.structured.tile_using_for %generic tile_sizes [10, 20]
258      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
259    transform.yield
260  }
261}
262//       CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
263// CHECK-LABEL: @indexed_semantics
264//       CHECK:   scf.for %[[I0:.+]] = %{{.*}} to %{{.*}} step %{{.*}}
265//       CHECK:     scf.for %[[I1:.+]] = %{{.*}} to %{{.*}} step %{{.*}}
266//       CHECK:       %[[INDEX0:.+]] = linalg.index 0
267//       CHECK:       %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I0]])[%[[INDEX0]]]
268//       CHECK:       %[[INDEX1:.+]] = linalg.index 1
269//       CHECK:       %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I1]])[%[[INDEX1]]]
270//       CHECK:       arith.addi %[[INDEX0_AMENDED]], %[[INDEX1_AMENDED]]
271
272// -----
273
274func.func @interchange_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
275    %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
276  %0 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
277      outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
278  return %0 : tensor<?x?xf32>
279}
280
281module attributes {transform.with_named_sequence} {
282  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
283    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
284      : (!transform.any_op) -> !transform.any_op
285    %a, %b, %c, %d = transform.structured.tile_using_for %matmul tile_sizes [10, 20, 30] interchange = [1, 2, 0]
286      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
287    transform.yield
288  }
289}
290//  CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)>
291//  CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 30)>
292//  CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)>
293//      CHECK-LABEL: func.func @interchange_matmul(
294// CHECK-SAME:     %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
295// CHECK-SAME:     %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32>
296// CHECK-SAME:     %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x?xf32>
297//  CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
298//  CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
299//  CHECK-DAG:   %[[M:.+]] = tensor.dim %[[ARG0]], %[[C0]]
300//  CHECK-DAG:   %[[K:.+]] = tensor.dim %[[ARG0]], %[[C1]]
301//  CHECK-DAG:   %[[N:.+]] = tensor.dim %[[ARG1]], %[[C1]]
302//  CHECK-DAG:   %[[C10:.+]] = arith.constant 10 : index
303//  CHECK-DAG:   %[[C20:.+]] = arith.constant 20 : index
304//  CHECK-DAG:   %[[C30:.+]] = arith.constant 30 : index
305//      CHECK:   %[[OUTER:[a-zA-Z0-9]+]] = scf.for %[[IV0:[a-zA-Z0-9]+]] = %[[C0]] to %[[N]] step %[[C20]]
306// CHECK-SAME:       iter_args(%[[INIT0:.+]] = %[[ARG2]])
307//      CHECK:     %[[INNER1:[a-zA-Z0-9]+]] = scf.for %[[IV1:[a-zA-Z0-9]+]] = %[[C0]] to %[[K]] step %[[C30]]
308// CHECK-SAME:         iter_args(%[[INIT1:.+]] = %[[INIT0]])
309//      CHECK:       %[[INNER2:[a-zA-Z0-9]+]] = scf.for %[[IV2:[a-zA-Z0-9]+]] = %[[C0]] to %[[M]] step %[[C10]]
310// CHECK-SAME:           iter_args(%[[INIT2:.+]] = %[[INIT1]])
311//  CHECK-DAG:         %[[TS_N:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[N]]]
312//  CHECK-DAG:         %[[TS_K:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[K]]]
313//  CHECK-DAG:         %[[TS_M:.+]] = affine.min #[[$MAP2]](%[[IV2]])[%[[M]]]
314//  CHECK-DAG:         %[[LHS_TILE:.+]] = tensor.extract_slice %[[ARG0]]
315// CHECK-SAME:             [%[[IV2]], %[[IV1]]] [%[[TS_M]], %[[TS_K]]] [1, 1]
316//  CHECK-DAG:         %[[RHS_TILE:.+]] = tensor.extract_slice %[[ARG1]]
317// CHECK-SAME:             [%[[IV1]], %[[IV0]]] [%[[TS_K]], %[[TS_N]]] [1, 1]
318//  CHECK-DAG:         %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT2]]
319// CHECK-SAME:             [%[[IV2]], %[[IV0]]] [%[[TS_M]], %[[TS_N]]] [1, 1]
320//      CHECK:         %[[GEMM_TILE:.+]] = linalg.matmul
321// CHECK-SAME:             ins(%[[LHS_TILE]], %[[RHS_TILE]] :
322// CHECK-SAME:             outs(%[[INIT_TILE]] :
323//      CHECK:         %[[UPDATE:.+]] = tensor.insert_slice %[[GEMM_TILE]] into %[[INIT2]]
324// CHECK-SAME:             [%[[IV2]], %[[IV0]]] [%[[TS_M]], %[[TS_N]]] [1, 1]
325//      CHECK:         scf.yield %[[UPDATE]]
326//      CHECK:       scf.yield %[[INNER2]]
327//      CHECK:     scf.yield %[[INNER1]]
328//      CHECK:   return %[[OUTER]]
329
330// -----
331
332func.func @linalg_copy_matmul(%a: memref<?x?xf32>, %b: memref<?x?xf32>) {
333  linalg.copy ins(%a : memref<?x?xf32>) outs(%b : memref<?x?xf32>)
334  return
335}
336
337module attributes {transform.with_named_sequence} {
338  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
339    %copy = transform.structured.match ops{["linalg.copy"]} in %arg1
340      : (!transform.any_op) -> !transform.any_op
341    %a, %b, %c = transform.structured.tile_using_for %copy tile_sizes [10, 20]
342      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
343    transform.yield
344  }
345}
346// CHECK-LABEL: func @linalg_copy_matmul(
347//       CHECK:   scf.for
348//       CHECK:     scf.for
349//       CHECK:       memref.subview
350//       CHECK:       memref.subview
351//       CHECK:       linalg.copy
352
353// -----
354
355func.func @check_scalar_operation(%arg0 : tensor<f32>) -> tensor<f32> {
356  %init = tensor.empty() : tensor<f32>
357  %0 = linalg.generic {
358      indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>],
359      iterator_types = []}
360      ins(%arg0 : tensor<f32>) outs(%init : tensor<f32>){
361    ^bb0(%b0 : f32, %b1 : f32):
362      %1 = arith.mulf %b0, %b0 : f32
363      linalg.yield %1 : f32
364  } -> tensor<f32>
365  return %0 : tensor<f32>
366}
367
368module attributes {transform.with_named_sequence} {
369  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
370    %generic = transform.structured.match ops{["linalg.generic"]} in %arg1
371      : (!transform.any_op) -> !transform.any_op
372    %a = transform.structured.tile_using_for %generic tile_sizes []
373      : (!transform.any_op) -> (!transform.any_op)
374    transform.yield
375  }
376}
377// CHECK-LABEL: func @check_scalar_operation
378//   CHECK-NOT:   scf.for
379//       CHECK:   linalg.generic
380
381// -----
382
383func.func @check_scalar_memref_operation(%arg0 : memref<f32>, %arg1 : memref<f32>){
384  linalg.generic {
385      indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>],
386      iterator_types = []}
387      ins(%arg0 : memref<f32>) outs(%arg1 : memref<f32>){
388    ^bb0(%b0 : f32, %b1 : f32):
389      %1 = arith.mulf %b0, %b0 : f32
390      linalg.yield %1 : f32
391  }
392  return
393}
394
395module attributes {transform.with_named_sequence} {
396  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
397    %generic = transform.structured.match ops{["linalg.generic"]} in %arg1
398      : (!transform.any_op) -> !transform.any_op
399    %a = transform.structured.tile_using_for %generic tile_sizes []
400      : (!transform.any_op) -> (!transform.any_op)
401    transform.yield
402  }
403}
404// CHECK-LABEL: func @check_scalar_memref_operation
405//   CHECK-NOT:   scf.for
406//       CHECK:   linalg.generic
407