xref: /llvm-project/mlir/test/Dialect/Linalg/transform-op-pack.mlir (revision 2c1c67674cb3beb4e091a9f446de5858631cf8ae)
1// RUN: mlir-opt -transform-interpreter -split-input-file -verify-diagnostics -allow-unregistered-dialect %s | FileCheck %s
2
3#map = affine_map<(d0, d1) -> (d0, d1)>
4#map1 = affine_map<(d0, d1) -> (d0)>
5#reduction_2d_trait = {
6  indexing_maps = [#map, #map1],
7  iterator_types = ["parallel", "reduction"]
8}
9
10//    CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
11//    CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2) -> (d0)>
12
13//  CHECK-LABEL: @reduction_2d_static
14//   CHECK-SAME:   %[[T0:.+]]: tensor<3x7xf16>,
15//   CHECK-SAME:   %[[T1:.+]]: tensor<3xf16>
16func.func @reduction_2d_static(%t0: tensor<3x7xf16>, %t1: tensor<3xf16>) -> tensor<3xf16> {
17  //      CHECK:  %[[EMPTY:.*]] = tensor.empty() : tensor<3x2x4xf16>
18  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
19  // CHECK-SAME:   inner_dims_pos = [1] inner_tiles = [4] into %[[EMPTY]] : tensor<3x7xf16> -> tensor<3x2x4xf16>
20  //  CHECK-NOT: tensor.pack
21  //      CHECK: linalg.generic
22  // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
23  // CHECK-SAME:   iterator_types = ["parallel", "reduction", "reduction"]
24  // CHECK-SAME:   ins(%{{.*}} : tensor<3x2x4xf16>)
25  // CHECK-SAME:  outs(%{{.*}} : tensor<3xf16>)
26  %2 = linalg.generic #reduction_2d_trait ins(%t0 : tensor<3x7xf16>) outs(%t1 : tensor<3xf16>) {
27  ^bb0(%in: f16, %out: f16):
28    %3 = arith.addf %in, %out : f16
29    linalg.yield %3 : f16
30  } -> tensor<3xf16>
31
32  //  CHECK-NOT: tensor.unpack
33  return %2 : tensor<3xf16>
34}
35
36module attributes {transform.with_named_sequence} {
37  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
38    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
39    transform.structured.pack %0 packed_sizes = [0, 4]
40        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
41        transform.yield
42  }
43}
44
45// -----
46
47#map = affine_map<(d0, d1) -> (d0, d1)>
48#map1 = affine_map<(d0, d1) -> (d1)>
49#col_reduction_2d_trait = {
50  indexing_maps = [#map, #map1],
51  iterator_types = ["reduction", "parallel"]
52}
53
54//    CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
55//    CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2) -> (d1)>
56
57//  CHECK-LABEL: @col_reduction_2d_static
58//   CHECK-SAME:   %[[T0:.+]]: tensor<7x3xf16>,
59//   CHECK-SAME:   %[[T1:.+]]: tensor<3xf16>
60func.func @col_reduction_2d_static(%t0: tensor<7x3xf16>, %t1: tensor<3xf16>) -> tensor<3xf16> {
61  //      CHECK:  %[[EMPTY:.*]] = tensor.empty() : tensor<3x2x4xf16>
62  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
63  // CHECK-SAME:   outer_dims_perm = [1, 0] inner_dims_pos = [0] inner_tiles = [4] into %[[EMPTY]] : tensor<7x3xf16> -> tensor<3x2x4xf16>
64  //  CHECK-NOT: tensor.pack
65  //      CHECK: linalg.generic
66  // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
67  // CHECK-SAME:   iterator_types = ["reduction", "parallel", "reduction"]
68  // CHECK-SAME:   ins(%{{.*}} : tensor<3x2x4xf16>)
69  // CHECK-SAME:  outs(%{{.*}} : tensor<3xf16>)
70  %2 = linalg.generic #col_reduction_2d_trait ins(%t0 : tensor<7x3xf16>) outs(%t1 : tensor<3xf16>) {
71  ^bb0(%in: f16, %out: f16):
72    %3 = arith.addf %in, %out : f16
73    linalg.yield %3 : f16
74  } -> tensor<3xf16>
75
76  //  CHECK-NOT: tensor.unpack
77  return %2 : tensor<3xf16>
78}
79
80module attributes {transform.with_named_sequence} {
81  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
82    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
83    %1 = transform.structured.pack %0 packed_sizes = [4, 0]
84        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
85    %pack = transform.get_producer_of_operand %1[0]
86      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
87    %2, %pack_2, %empty_unpack_2 =
88      transform.structured.pack_transpose %pack with_compute_op(%1)
89      outer_perm = [1, 0]
90       : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
91      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
92      transform.yield
93  }
94}
95
96// -----
97
98#map = affine_map<(d0, d1) -> (d0, d1)>
99#map1 = affine_map<(d0, d1) -> (d0)>
100#reduction_2d_trait = {
101  indexing_maps = [#map, #map1],
102  iterator_types = ["parallel", "reduction"]
103}
104
105//    CHECK-DAG:     #[[$DIV4:.*]] = affine_map<()[s0] -> (s0 ceildiv 4)>
106//    CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
107//    CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2) -> (d0)>
108
109//  CHECK-LABEL: @reduction_2d_dynamic
110//   CHECK-SAME:   %[[T0:.+]]: tensor<?x?xf16>,
111//   CHECK-SAME:   %[[T1:.+]]: tensor<?xf16>
112func.func @reduction_2d_dynamic(%t0: tensor<?x?xf16>, %t1: tensor<?xf16>) -> tensor<?xf16> {
113  //  CHECK-DAG:     %[[C0:.*]] = arith.constant 0 : index
114  //  CHECK-DAG:     %[[C1:.*]] = arith.constant 1 : index
115  //  CHECK-DAG:     %[[D0:.*]] = tensor.dim %[[T0]], %[[C0]] : tensor<?x?xf16>
116  //  CHECK-DAG:     %[[D1:.*]] = tensor.dim %[[T0]], %[[C1]] : tensor<?x?xf16>
117  //      CHECK:   %[[D1B4:.*]] = affine.apply #[[$DIV4]]()[%[[D1]]]
118  //      CHECK:  %[[EMPTY:.*]] = tensor.empty(%[[D0]], %[[D1B4]]) : tensor<?x?x4xf16>
119  //      CHECK: %[[PACKED:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
120  // CHECK-SAME:   inner_dims_pos = [1] inner_tiles = [4] into %[[EMPTY]] : tensor<?x?xf16> -> tensor<?x?x4xf16>
121  //  CHECK-NOT: tensor.pack
122  //      CHECK: linalg.generic
123  // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
124  // CHECK-SAME:   iterator_types = ["parallel", "reduction", "reduction"]
125  // CHECK-SAME:   ins(%{{.*}} : tensor<?x?x4xf16>)
126  // CHECK-SAME:  outs(%{{.*}} : tensor<?xf16>)
127  %2 = linalg.generic #reduction_2d_trait ins(%t0 : tensor<?x?xf16>) outs(%t1 : tensor<?xf16>) {
128  ^bb0(%in: f16, %out: f16):
129    %3 = arith.addf %in, %out : f16
130    linalg.yield %3 : f16
131  } -> tensor<?xf16>
132
133  //  CHECK-NOT: tensor.unpack
134  return %2 : tensor<?xf16>
135}
136
137module attributes {transform.with_named_sequence} {
138  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
139    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
140    transform.structured.pack %0 packed_sizes = [0, 4]
141        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
142        transform.yield
143  }
144}
145
146
147// -----
148
149#map = affine_map<(d0, d1) -> (d0, d1)>
150#map1 = affine_map<(d0, d1) -> (d0)>
151#reduction_2d_trait = {
152  indexing_maps = [#map, #map1],
153  iterator_types = ["parallel", "reduction"]
154}
155
156//    CHECK-DAG:     #[[$DIV3:.*]] = affine_map<()[s0] -> (s0 ceildiv 3)>
157//    CHECK-DAG:     #[[$DIV4:.*]] = affine_map<()[s0] -> (s0 ceildiv 4)>
158//    CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
159//    CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d2)>
160
161//  CHECK-LABEL: @reduction_2d_dynamic
162//   CHECK-SAME:   %[[T0:.+]]: tensor<?x?xf16>,
163//   CHECK-SAME:   %[[T1:.+]]: tensor<?xf16>
164func.func @reduction_2d_dynamic(%t0: tensor<?x?xf16>, %t1: tensor<?xf16>) -> tensor<?xf16> {
165  //      CHECK: %[[PACKED_0:.*]] = tensor.pack %[[T0]] padding_value(%{{.*}} : f16)
166  // CHECK-SAME:   inner_dims_pos = [0, 1] inner_tiles = [3, 4] into %{{.*}} : tensor<?x?xf16> -> tensor<?x?x3x4xf16>
167  //      CHECK: %[[PACKED_1:.*]] = tensor.pack %[[T1]] padding_value(%{{.*}} : f16)
168  // CHECK-SAME:   inner_dims_pos = [0] inner_tiles = [3] into %{{.*}} : tensor<?xf16> -> tensor<?x3xf16>
169  //  CHECK-NOT: tensor.pack
170  //      CHECK: linalg.generic
171  // CHECK-SAME:   indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]]]
172  // CHECK-SAME:   iterator_types = ["parallel", "reduction", "parallel", "reduction"]
173  // CHECK-SAME:   ins(%{{.*}} : tensor<?x?x3x4xf16>)
174  // CHECK-SAME:  outs(%{{.*}} : tensor<?x3xf16>)
175  %2 = linalg.generic #reduction_2d_trait ins(%t0 : tensor<?x?xf16>) outs(%t1 : tensor<?xf16>) {
176  ^bb0(%in: f16, %out: f16):
177    %3 = arith.addf %in, %out : f16
178    linalg.yield %3 : f16
179  } -> tensor<?xf16>
180
181  //      CHECK: tensor.unpack %{{.*}} inner_dims_pos = [0] inner_tiles = [3] into %{{.*}} : tensor<?x3xf16> -> tensor<?xf16>
182  return %2 : tensor<?xf16>
183}
184
185module attributes {transform.with_named_sequence} {
186  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
187    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
188    transform.structured.pack %0 packed_sizes = [3, 4]
189        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
190        transform.yield
191  }
192}
193
194// -----
195
196//                                                M   N   K   m   n   k       M   K   m   k
197// CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>
198//                                                                            K   N   n   k
199// CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d4, d5)>
200//                                                                            M   N   m   n
201// CHECK-DAG: #[[$PACKED_MAP_2:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d0, d4, d3)>
202
203// CHECK-LABEL: @matmul
204//  CHECK-SAME:   %[[A:[0-9a-zA-Z]+]]: tensor<?x?xf32>,
205//  CHECK-SAME:   %[[B:[0-9a-zA-Z]+]]: tensor<?x?xf32>,
206//  CHECK-SAME:   %[[C:[0-9a-zA-Z]+]]: tensor<?x?xf32>
207func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
208    -> tensor<?x?xf32> {
209
210  //      CHECK: %[[PACK_A:.*]] = tensor.pack %{{.*}} inner_dims_pos = [0, 1] inner_tiles = [2, 4]
211  // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x2x4xf32>
212  //      CHECK: %[[PACK_B:.*]] = tensor.pack %{{.*}} inner_dims_pos = [1, 0] inner_tiles = [3, 4]
213  // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x3x4xf32>
214  //      CHECK: %[[PACK_C:.*]] = tensor.pack %{{.*}} outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [3, 2]
215  // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x3x2xf32>
216
217  //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
218  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]}
219  // CHECK-SAME:  ins(%{{.*}} : tensor<?x?x2x4xf32>, tensor<?x?x3x4xf32>)
220  // CHECK-SAME: outs(%{{.*}} : tensor<?x?x3x2xf32>)
221  %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
222                     outs(%C: tensor<?x?xf32>)
223    -> tensor<?x?xf32>
224
225  //      CHECK: tensor.unpack %{{.*}} outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [3, 2]
226  // CHECK-SAME:   : tensor<?x?x3x2xf32> -> tensor<?x?xf32>
227  return %0 : tensor<?x?xf32>
228}
229
230module attributes {transform.with_named_sequence} {
231  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
232      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
233      //                                            M  N  K
234      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
235        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
236
237      %unpack = transform.get_consumers_of_result %1[0]
238        : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
239      %2, %pack_2, %unpack_2 =
240        transform.structured.pack_transpose %unpack with_compute_op(%1)
241        outer_perm = [1, 0] inner_perm = [1, 0]
242        : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">)
243        -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
244        transform.yield
245  }
246}
247
248// -----
249
250//                                                N   F   H   W   C  KH  KW   f   c
251// CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d4, d2 + d5, d3 + d6, d8)>
252// CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d1, d4, d5, d6, d7, d8)>
253// CHECK-DAG: #[[$PACKED_MAP_2:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d2, d3, d7)>
254
255// CHECK-LABEL: @conv_2d_nchw_fchw
256//  CHECK-SAME:   %[[INPUT:.+]]: tensor<14x512x28x28xf32>,
257//  CHECK-SAME:   %[[FILTER:.+]]: tensor<1024x512x1x1xf32>
258//  CHECK-SAME:   %[[INIT:.+]]: tensor<14x1024x28x28xf32>
259func.func @conv_2d_nchw_fchw(%i: tensor<14x512x28x28xf32>, %f: tensor<1024x512x1x1xf32>,
260                             %o: tensor<14x1024x28x28xf32>) -> tensor<14x1024x28x28xf32> {
261
262  //      CHECK: %[[PACK_INPUT:.*]] = tensor.pack %{{.*}} inner_dims_pos = [1] inner_tiles = [8]
263  // CHECK-SAME:   : tensor<14x512x28x28xf32> -> tensor<14x64x28x28x8xf32>
264  //      CHECK: %[[PACK_FILTER:.*]] = tensor.pack %{{.*}} inner_dims_pos = [0, 1] inner_tiles = [4, 8]
265  // CHECK-SAME:   : tensor<1024x512x1x1xf32> -> tensor<256x64x1x1x4x8xf32>
266  //      CHECK: %[[PACK_INPUT:.*]] = tensor.pack %{{.*}} inner_dims_pos = [1] inner_tiles = [4]
267  // CHECK-SAME:   : tensor<14x1024x28x28xf32> -> tensor<14x256x28x28x4xf32>
268  //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
269  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction", "parallel", "reduction"]}
270  // CHECK-SAME:  ins(%{{.*}} : tensor<14x64x28x28x8xf32>, tensor<256x64x1x1x4x8xf32>)
271  // CHECK-SAME: outs(%{{.*}} : tensor<14x256x28x28x4xf32>)
272  %0 = linalg.conv_2d_nchw_fchw ins(%i, %f: tensor<14x512x28x28xf32>, tensor<1024x512x1x1xf32>)
273                                outs(%o: tensor<14x1024x28x28xf32>) -> tensor<14x1024x28x28xf32>
274
275  //      CHECK: tensor.unpack %{{.*}} inner_dims_pos = [1] inner_tiles = [4]
276  // CHECK-SAME:   : tensor<14x256x28x28x4xf32> -> tensor<14x1024x28x28xf32>
277  return %0: tensor<14x1024x28x28xf32>
278}
279
280module attributes {transform.with_named_sequence} {
281  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
282    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
283    //                                            N  F  H  W  C KH KW
284    %1 = transform.structured.pack %0 packed_sizes = [0, 4, 0, 0, 8, 0, 0]
285        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
286        transform.yield
287  }
288}
289
290// -----
291
292//                                                N   H   W   F  KH  KW   C   f   c
293// CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1 + d4, d2 + d5, d6, d8)>
294// CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d4, d5, d6, d3, d7, d8)>
295// CHECK-DAG: #[[$PACKED_MAP_2:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d2, d3, d7)>
296
297// CHECK-LABEL: @conv_2d_nhwc_hwcf
298//  CHECK-SAME:   %[[INPUT:.+]]: tensor<?x1x?x?xf32>,
299//  CHECK-SAME:   %[[FILTER:.+]]: tensor<1x?x?x?xf32>
300//  CHECK-SAME:   %[[INIT:.+]]: tensor<?x1x?x?xf32>
301func.func @conv_2d_nhwc_hwcf(%input: tensor<?x1x?x?xf32>, %filter: tensor<1x?x?x?xf32>, %init: tensor<?x1x?x?xf32>) -> tensor<?x1x?x?xf32> {
302
303  //      CHECK: %[[PACK_INPUT:.*]] = tensor.pack %{{.*}} inner_dims_pos = [3] inner_tiles = [6]
304  // CHECK-SAME:   : tensor<?x1x?x?xf32> -> tensor<?x1x?x?x6xf32>
305  //      CHECK: %[[PACK_FILTER:.*]] = tensor.pack %{{.*}} inner_dims_pos = [3, 2] inner_tiles = [4, 6]
306  // CHECK-SAME:   : tensor<1x?x?x?xf32> -> tensor<1x?x?x?x4x6xf32>
307  //      CHECK: %[[PACK_OUTPUT:.*]] = tensor.pack %{{.*}} inner_dims_pos = [3] inner_tiles = [4]
308  // CHECK-SAME:   : tensor<?x1x?x?xf32> -> tensor<?x1x?x?x4xf32>
309
310  //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
311  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction", "parallel", "reduction"]}
312  // CHECK-SAME:  ins(%{{.*}} : tensor<?x1x?x?x6xf32>, tensor<1x?x?x?x4x6xf32>)
313  // CHECK-SAME: outs(%{{.*}} : tensor<?x1x?x?x4xf32>)
314  %0 = linalg.conv_2d_nhwc_hwcf
315     ins (%input, %filter: tensor<?x1x?x?xf32>, tensor<1x?x?x?xf32>)
316    outs (%init: tensor<?x1x?x?xf32>) -> tensor<?x1x?x?xf32>
317
318  //      CHECK: tensor.unpack %{{.*}} inner_dims_pos = [3] inner_tiles = [4]
319  // CHECK-SAME:   : tensor<?x1x?x?x4xf32> -> tensor<?x1x?x?xf32>
320  return %0 : tensor<?x1x?x?xf32>
321}
322
323module attributes {transform.with_named_sequence} {
324  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
325    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
326    //                                            N  H  W  F KH KW  C
327    %1 = transform.structured.pack %0 packed_sizes = [0, 0, 0, 4, 0, 0, 6]
328        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
329        transform.yield
330  }
331}
332
333// -----
334
335// CHECK-DAG: affine_map<()[s0, s1] -> (s0 ceildiv s1)>
336//                                                M   N   K    n   k      M   K   k
337// CHECK-DAG: #[[$PACKED_MAP_0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d2, d4)>
338//                                                                        K   N   n   k
339// CHECK-DAG: #[[$PACKED_MAP_1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d1, d3, d4)>
340//                                                                        M   N    n
341// CHECK-DAG: #[[$PACKED_MAP_2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d3)>
342
343// CHECK-LABEL: @matmul_dynamic_pack_size
344//  CHECK-SAME:   %[[A:[0-9a-zA-Z]+]]: tensor<?x?xf32>,
345//  CHECK-SAME:   %[[B:[0-9a-zA-Z]+]]: tensor<?x?xf32>,
346//  CHECK-SAME:   %[[C:[0-9a-zA-Z]+]]: tensor<?x?xf32>
347func.func @matmul_dynamic_pack_size(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
348    -> tensor<?x?xf32> {
349  //      CHECK: %[[TS:.*]] = "some_tile_size"() : () -> index
350  %sz = "some_tile_size"() : () -> (index)
351
352  //      CHECK: %[[PACK_A:.*]] = tensor.pack %[[A]] {{.*}} inner_dims_pos = [1] inner_tiles = [%[[TS]]]
353  // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x?xf32>
354  //      CHECK: %[[PACK_B:.*]] = tensor.pack %[[B]] {{.*}} inner_dims_pos = [1, 0] inner_tiles = [%[[TS]], %[[TS]]]
355  // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x?x?xf32>
356  //      CHECK: %[[PACK_C:.*]] = tensor.pack %[[C]] {{.*}} inner_dims_pos = [1] inner_tiles = [%[[TS]]]
357  // CHECK-SAME:   : tensor<?x?xf32> -> tensor<?x?x?xf32>
358  //      CHECK: linalg.generic {indexing_maps = [#[[$PACKED_MAP_0]], #[[$PACKED_MAP_1]], #[[$PACKED_MAP_2]]]
359  // CHECK-SAME:     iterator_types = ["parallel", "parallel", "reduction", "parallel", "reduction"]}
360  // CHECK-SAME:  ins(%{{.*}} : tensor<?x?x?xf32>, tensor<?x?x?x?xf32>)
361  // CHECK-SAME: outs(%{{.*}} : tensor<?x?x?xf32>)
362  %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
363                     outs(%C: tensor<?x?xf32>)
364    -> tensor<?x?xf32>
365
366  //      CHECK: tensor.unpack %{{.*}} inner_dims_pos = [1] inner_tiles = [%[[TS]]] into %[[C]]
367  // CHECK-SAME:   : tensor<?x?x?xf32> -> tensor<?x?xf32>
368  return %0 : tensor<?x?xf32>
369}
370
371module attributes {transform.with_named_sequence} {
372  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
373      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
374      %sz = transform.structured.match ops{["some_tile_size"]} in %arg1 : (!transform.any_op) -> !transform.any_op
375      %1 = transform.structured.pack %0 packed_sizes = [0, %sz, %sz]
376        : (!transform.any_op, !transform.any_op, !transform.any_op) -> (!transform.op<"linalg.generic">)
377        transform.yield
378  }
379}
380
381// -----
382
383func.func @conv_cant_pack(%i: tensor<14x512x28x28xf32>, %f: tensor<1024x512x1x1xf32>,
384                          %o: tensor<14x1024x28x28xf32>) -> tensor<14x1024x28x28xf32> {
385  %0 = linalg.conv_2d_nchw_fchw ins(%i, %f: tensor<14x512x28x28xf32>, tensor<1024x512x1x1xf32>)
386                                outs(%o: tensor<14x1024x28x28xf32>) -> tensor<14x1024x28x28xf32>
387  return %0: tensor<14x1024x28x28xf32>
388}
389
390module attributes {transform.with_named_sequence} {
391  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
392    %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
393    //                                                N  F  H  W  C KH KW
394    // expected-error @below {{data tiling failed}}
395    %1 = transform.structured.pack %0 packed_sizes = [0, 0, 4, 0, 0, 0, 0]
396        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
397        transform.yield
398  }
399}
400
401// -----
402
403func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
404    -> (tensor<?x?xf32>, tensor<?x?xf32>) {
405  %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
406                     outs(%C: tensor<?x?xf32>)
407    -> tensor<?x?xf32>
408  %1 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
409                     outs(%C: tensor<?x?xf32>)
410    -> tensor<?x?xf32>
411  return %0, %1 : tensor<?x?xf32>, tensor<?x?xf32>
412}
413
414module attributes {transform.with_named_sequence} {
415  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
416      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
417      // expected-error @below {{requires target to map to exactly 1 LinalgOp (got 2)}}
418      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
419        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
420        transform.yield
421  }
422}
423
424
425// -----
426
427func.func @matmul(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
428    -> tensor<?x?xf32> {
429  %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
430                     outs(%C: tensor<?x?xf32>)
431    -> tensor<?x?xf32>
432  return %0 : tensor<?x?xf32>
433}
434
435module attributes {transform.with_named_sequence} {
436  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
437      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
438      // expected-error @below {{requires number of packed sizes match the number of loops (2 vs 3)}}
439      %1 = transform.structured.pack %0 packed_sizes = [2, 3]
440        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
441        transform.yield
442  }
443}
444
445// -----
446
447func.func @no_single_packing_op(%source: tensor<128x256xf32>, %dest: tensor<4x16x32x16xf32>) {
448  %0 = tensor.pack %source inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<4x16x32x16xf32>
449  %1 = tensor.unpack %0 inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %source : tensor<4x16x32x16xf32> -> tensor<128x256xf32>
450  %2 = tensor.pack %source inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<4x16x32x16xf32>
451  return
452}
453
454module attributes {transform.with_named_sequence} {
455  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
456      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
457      %1 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
458        // expected-error @below {{requires target to map to exactly 1 packing op and 1 packed op (got 2 and 1)}}
459      transform.structured.pack_transpose %0 with_compute_op(%1)
460      inner_perm = [0]
461        : (!transform.any_op, !transform.any_op)
462        -> (!transform.any_op, !transform.any_op, !transform.any_op)
463        transform.yield
464  }
465}
466
467// -----
468
469func.func @no_single_pack_unpack(%source: tensor<128x256xf32>, %dest: tensor<4x16x32x16xf32>) {
470  %0 = arith.constant 0 : index
471  %1 = tensor.empty() : tensor<f32>
472  return
473}
474
475module attributes {transform.with_named_sequence} {
476  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
477      %0 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
478      %1 = transform.structured.match ops{["tensor.empty"]} in %arg1 : (!transform.any_op) -> !transform.any_op
479        // expected-error @below {{requires target to map to a tensor.pack or tensor.unpack}}
480      transform.structured.pack_transpose %0 with_compute_op(%1)
481      inner_perm = [0]
482        : (!transform.any_op, !transform.any_op)
483        -> (!transform.any_op, !transform.any_op, !transform.any_op)
484        transform.yield
485  }
486}
487
488// -----
489
490func.func @no_linalg_target(%source: tensor<128x256xf32>, %dest: tensor<4x16x32x16xf32>) {
491  %0 = tensor.pack %source inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<4x16x32x16xf32>
492  %1 = arith.constant 0 : index
493  return
494}
495
496module attributes {transform.with_named_sequence} {
497  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
498      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
499      %1 = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
500        // expected-error @below {{requires a LinalgOp target}}
501      transform.structured.pack_transpose %0 with_compute_op(%1)
502      inner_perm = [0]
503        : (!transform.any_op, !transform.any_op)
504        -> (!transform.any_op, !transform.any_op, !transform.any_op)
505        transform.yield
506  }
507}
508
509// -----
510
511func.func @no_single_use_by_linalg(%source: tensor<128x256xf32>, %dest: tensor<4x16x32x16xf32>) {
512  %0 = tensor.pack %source inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<4x16x32x16xf32>
513  %f0 = arith.constant 0.0 : f32
514  %1 = tensor.empty() : tensor<f32>
515  %2 = linalg.fill ins(%f0: f32) outs(%1 : tensor<f32>) -> tensor<f32>
516  return
517}
518
519module attributes {transform.with_named_sequence} {
520  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
521      %0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
522      %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
523        // expected-error @below {{not a single use by the LinalgOp target}}
524      transform.structured.pack_transpose %0 with_compute_op(%1)
525      inner_perm = [0]
526        : (!transform.any_op, !transform.any_op)
527        -> (!transform.any_op, !transform.any_op, !transform.any_op)
528        transform.yield
529  }
530}
531
532// -----
533
534func.func @not_produced_by_linalg(%source: tensor<128x256xf32>, %dest: tensor<4x16x32x16xf32>) {
535  %a = tensor.pack %source inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<4x16x32x16xf32>
536  %b = tensor.unpack %a inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %source : tensor<4x16x32x16xf32> -> tensor<128x256xf32>
537  %f0 = arith.constant 0.0 : f32
538  %1 = tensor.empty() : tensor<f32>
539  %2 = linalg.fill ins(%f0: f32) outs(%1 : tensor<f32>) -> tensor<f32>
540  return
541}
542
543module attributes {transform.with_named_sequence} {
544  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
545      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
546      %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
547        // expected-error @below {{not produced by the LinalgOp target}}
548      transform.structured.pack_transpose %0 with_compute_op(%1)
549      inner_perm = [0]
550        : (!transform.any_op, !transform.any_op)
551        -> (!transform.any_op, !transform.any_op, !transform.any_op)
552        transform.yield
553  }
554}
555
556// -----
557
558func.func @no_matching_pack(%source: tensor<16xf32>) {
559  %f0 = arith.constant 0.0 : f32
560  %1 = tensor.empty() : tensor<4x4xf32>
561  %2 = linalg.fill ins(%f0: f32) outs(%1 : tensor<4x4xf32>) -> tensor<4x4xf32>
562  %b = tensor.unpack %2 inner_dims_pos = [0] inner_tiles = [4] into %source : tensor<4x4xf32> -> tensor<16xf32>
563  return
564}
565
566module attributes {transform.with_named_sequence} {
567  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
568      %0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
569      %1 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
570        // expected-error @below {{could not find matching pack op}}
571      transform.structured.pack_transpose %0 with_compute_op(%1)
572      inner_perm = [0]
573        : (!transform.any_op, !transform.any_op)
574        -> (!transform.any_op, !transform.any_op, !transform.any_op)
575        transform.yield
576  }
577}
578
579// -----
580
581func.func @invalid_outer_perm(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
582    -> tensor<?x?xf32> {
583  %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
584                     outs(%C: tensor<?x?xf32>)
585    -> tensor<?x?xf32>
586  return %0 : tensor<?x?xf32>
587}
588
589module attributes {transform.with_named_sequence} {
590  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
591      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
592      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
593        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
594
595      %unpack = transform.get_consumers_of_result %1[0]
596        : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
597      %2, %pack_2, %unpack_2 =
598        // expected-error @below {{invalid outer_perm}}
599        transform.structured.pack_transpose %unpack with_compute_op(%1)
600        outer_perm = [1]
601        : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">)
602        -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
603        transform.yield
604  }
605}
606
607// -----
608
609func.func @invalid_inner_perm(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>)
610    -> tensor<?x?xf32> {
611  %0 = linalg.matmul  ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>)
612                     outs(%C: tensor<?x?xf32>)
613    -> tensor<?x?xf32>
614  return %0 : tensor<?x?xf32>
615}
616
617module attributes {transform.with_named_sequence} {
618  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
619      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
620      %1 = transform.structured.pack %0 packed_sizes = [2, 3, 4]
621        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
622
623      %unpack = transform.get_consumers_of_result %1[0]
624        : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.unpack">)
625      %2, %pack_2, %unpack_2 =
626        // expected-error @below {{invalid inner_perm}}
627        transform.structured.pack_transpose %unpack with_compute_op(%1)
628        inner_perm = [1]
629        : (!transform.op<"tensor.unpack">, !transform.op<"linalg.generic">)
630        -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.op<"tensor.unpack">)
631        transform.yield
632  }
633}
634
635// -----
636
637func.func @no_padding_on_packs(%A: tensor<32x32xf32>, %B: tensor<32x32xf32>, %C: tensor<32x32xf32>)
638    -> tensor<32x32xf32> {
639  %0 = linalg.matmul  ins(%A, %B: tensor<32x32xf32>, tensor<32x32xf32>)
640                     outs(%C: tensor<32x32xf32>)
641    -> tensor<32x32xf32>
642  return %0 : tensor<32x32xf32>
643}
644
645// CHECK-LABEL: no_padding_on_packs
646// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [4, 8]
647// CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<8x4x4x8xf32>
648// CHECK: tensor.pack %{{.+}} outer_dims_perm = [1, 0]
649// CHECK-SAME:  inner_dims_pos = [0, 1] inner_tiles = [8, 8]
650// CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<4x4x8x8xf32>
651// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [4, 8]
652// CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<8x4x4x8xf32>
653
654module attributes {transform.with_named_sequence} {
655  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
656      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
657      %1 = transform.structured.pack %0 packed_sizes = [4, 8, 8]
658        : (!transform.any_op) -> (!transform.op<"linalg.generic">)
659      %pack = transform.get_producer_of_operand %1[1]
660      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
661      %2, %pack_2, %empty_unpack_2 =
662      transform.structured.pack_transpose %pack with_compute_op(%1)
663      outer_perm = [1, 0] inner_perm = [1, 0]
664       : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
665      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
666      transform.yield
667  }
668}
669