xref: /llvm-project/mlir/test/Dialect/Linalg/transform-pack-greedily.mlir (revision e4384149b58f7c3d19c5d38bc46038c660b77ca9)
1// RUN: mlir-opt %s -transform-interpreter --split-input-file | FileCheck %s
2
3!A_mk = tensor<1023x255xf32>
4!B_kn = tensor<255x127xf32>
5!C_mn = tensor<1023x127xf32>
6
7// Normalized dims are:                     ( k,  m,  n)(kk, mm, nn)
8// CHECK-DAG: #[[$mk_kkmm:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d0, d3, d4)>
9// CHECK-DAG: #[[$kn_kknn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>
10// CHECK-DAG: #[[$mn_mmnn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d2, d4, d5)>
11
12// CHECK-LABEL: @matmul_mk_kn_mn(
13func.func @matmul_mk_kn_mn(%A : !A_mk, %B : !B_kn, %C : !C_mn) -> !C_mn {
14  //      CHECK: linalg.generic
15  // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
16  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
17  // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<8x8x32x16xf32>)
18  // CHECK-SAME:  outs(%{{.*}} : tensor<128x8x8x16xf32>)
19  %0 = linalg.matmul ins(%A, %B : !A_mk, !B_kn) outs(%C : !C_mn) -> !C_mn
20  return %0 : !C_mn
21}
22
23module attributes {transform.with_named_sequence} {
24  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
25    %matmul = transform.structured.match ops{["linalg.matmul"]} in %module_op
26      : (!transform.any_op) -> !transform.op<"linalg.matmul">
27    transform.structured.pack_greedily %matmul
28        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
29      : (!transform.op<"linalg.matmul">) -> !transform.op<"linalg.generic">
30      transform.yield
31  }
32}
33
34// -----
35
36!A_mk = tensor<1023x255xf32>
37!B_nk = tensor<127x255xf32>
38!C_nm = tensor<127x1023xf32>
39
40#mkn_accesses = [
41  affine_map<(m, n, k) -> (m, k)>,
42  affine_map<(m, n, k) -> (n, k)>,
43  affine_map<(m, n, k) -> (n, m)>
44]
45#mkn_trait = {
46  indexing_maps = #mkn_accesses,
47  iterator_types = ["parallel", "parallel", "reduction"]
48}
49
50// Normalized dims are:                     ( k,  m,  n)(kk, mm, nn)
51// CHECK-DAG: #[[$km_kkmm:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d0, d3, d4)>
52// CHECK-DAG: #[[$kn_kknn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d0, d3, d5)>
53// CHECK-DAG: #[[$mn_mmnn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d4, d5)>
54
55// CHECK-LABEL: @matmul_mk_nk_nm(
56func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
57  //      CHECK: linalg.generic
58  // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
59  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
60  // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<8x8x32x16xf32>)
61  // CHECK-SAME:  outs(%{{.*}} : tensor<8x128x8x16xf32>)
62  %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
63    ^bb0(%a: f32, %b: f32, %c: f32):
64      %d = arith.mulf %a, %b : f32
65      %e = arith.addf %c, %d : f32
66      linalg.yield %e : f32
67  } -> !C_nm
68  return %0 : !C_nm
69}
70
71module attributes {transform.with_named_sequence} {
72  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
73    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
74    transform.structured.pack_greedily %generic
75        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
76      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
77      transform.yield
78  }
79}
80
81// -----
82
83!A_mk = tensor<1023x255xf32>
84!B_nk = tensor<127x255xf32>
85!C_nm = tensor<127x1023xf32>
86
87#mkn_accesses = [
88  affine_map<(k, m, n) -> (m, k)>,
89  affine_map<(k, m, n) -> (n, k)>,
90  affine_map<(k, m, n) -> (n, m)>
91]
92#mkn_trait = {
93  indexing_maps = #mkn_accesses,
94  iterator_types = ["reduction", "parallel", "parallel"]
95}
96
97// Normalized dims are:                     ( k,  m,  n)(kk, mm, nn)
98// CHECK-DAG: #[[$mk_kkmm:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d0, d3, d4)>
99// CHECK-DAG: #[[$kn_kknn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d0, d3, d5)>
100// CHECK-DAG: #[[$mn_mmnn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d4, d5)>
101
102// CHECK-LABEL: @matmul_mk_nk_nm_transposed(
103func.func @matmul_mk_nk_nm_transposed(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
104  //      CHECK: linalg.generic
105  // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
106  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
107  // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<8x8x32x16xf32>)
108  // CHECK-SAME:  outs(%{{.*}} : tensor<8x128x8x16xf32>)
109  %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
110    ^bb0(%a: f32, %b: f32, %c: f32):
111      %d = arith.mulf %a, %b : f32
112      %e = arith.addf %c, %d : f32
113      linalg.yield %e : f32
114  } -> !C_nm
115  return %0 : !C_nm
116}
117
118module attributes {transform.with_named_sequence} {
119  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
120    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
121    transform.structured.pack_greedily %generic
122        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
123      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
124      transform.yield
125  }
126}
127
128// -----
129
130!A_bmkm2 = tensor<42x1023x255x33xf32>
131!B_nkb = tensor<127x255x42xf32>
132!C_nbm = tensor<127x42x1023xf32>
133
134#mkn_accesses = [
135  affine_map<(k, m, n, b, m2) -> (b, m, k, m2)>,
136  affine_map<(k, m, n, b, m2) -> (n, k, b)>,
137  affine_map<(k, m, n, b, m2) -> (n, b, m)>
138]
139#mkn_trait = {
140  indexing_maps = #mkn_accesses,
141  iterator_types = ["reduction", "parallel", "parallel", "parallel", "parallel"]
142}
143
144// Normalized dims are:                        ( ?,  ?,  k,  m,  n)(kk, mm, nn)
145// CHECK-DAG: #[[$bmkm2_kkmm:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d3, d2, d1, d5, d6)>
146// CHECK-DAG:   #[[$nkb_kknn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d4, d2, d0, d5, d7)>
147// CHECK-DAG:   #[[$nbm_mmnn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d4, d0, d3, d6, d7)>
148
149// CHECK-LABEL: @contraction_bmkm2_nkb_nbm(
150func.func @contraction_bmkm2_nkb_nbm(%A : !A_bmkm2, %B : !B_nkb, %C : !C_nbm) -> !C_nbm {
151  //      CHECK: linalg.generic
152  // CHECK-SAME: indexing_maps = [#[[$bmkm2_kkmm]], #[[$nkb_kknn]], #[[$nbm_mmnn]]]
153  // CHECK-SAME:   ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
154  // CHECK-SAME:   ins(%{{.*}} : tensor<42x128x8x33x32x8xf32>, tensor<8x8x42x32x16xf32>)
155  // CHECK-SAME:  outs(%{{.*}} : tensor<8x42x128x8x16xf32>)
156  %0 = linalg.generic #mkn_trait ins(%A, %B : !A_bmkm2, !B_nkb) outs(%C : !C_nbm) {
157    ^bb0(%a: f32, %b: f32, %c: f32):
158      %d = arith.mulf %a, %b : f32
159      %e = arith.addf %c, %d : f32
160      linalg.yield %e : f32
161  } -> !C_nbm
162  return %0 : !C_nbm
163}
164
165module attributes {transform.with_named_sequence} {
166  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
167    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
168    transform.structured.pack_greedily %generic
169        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
170      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
171      transform.yield
172  }
173}
174
175// -----
176
177// Conv linguo:                          h   w  kh  kw   c   n   f  cc  nn  ff
178// Normalized dims are:                ( ?,  ?,  ?,  ?,  k,  m,  n)(kk, mm, nn)
179//                                                                                   n   c   h + kh   w + kw  cc  nn
180// CHECK-DAG: #[[$M1:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d5, d4, d0 + d2, d1 + d3, d7, d8)>
181//                                                                                   f   c  kh  kw  cc  ff
182// CHECK-DAG: #[[$M2:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d6, d4, d2, d3, d7, d9)>
183//                                                                                   n   f   h   w  nn  ff
184// CHECK-DAG: #[[$M3:.*]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d5, d6, d0, d1, d8, d9)>
185
186// CHECK-LABEL: @conv_2d_nchw_fchw
187func.func @conv_2d_nchw_fchw(%arg0: tensor<?x47x16x16xf32>, %arg2: tensor<?x16x14x14xf32>) -> tensor<?x16x14x14xf32> {
188  %c0 = arith.constant dense<0.1> : tensor<16x47x3x3xf32>
189  //      CHECK: linalg.generic
190  // CHECK-SAME: indexing_maps = [#[[$M1]], #[[$M2]], #[[$M3]]]
191  // CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction", "reduction", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]
192  // CHECK-SAME:  ins(%{{.*}} : tensor<?x2x16x16x32x8xf32>, tensor<1x2x3x3x32x16xf32>)
193  // CHECK-SAME: outs(%{{.*}} : tensor<?x1x14x14x8x16xf32>)
194  %0 = linalg.conv_2d_nchw_fchw
195    {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> }
196     ins(%arg0, %c0: tensor<?x47x16x16xf32>, tensor<16x47x3x3xf32>)
197    outs(%arg2: tensor<?x16x14x14xf32>) -> tensor<?x16x14x14xf32>
198  return %0 : tensor<?x16x14x14xf32>
199}
200
201module attributes {transform.with_named_sequence} {
202  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
203    %conv = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %module_op
204      : (!transform.any_op) -> !transform.op<"linalg.conv_2d_nchw_fchw">
205    transform.structured.pack_greedily %conv
206        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
207      : (!transform.op<"linalg.conv_2d_nchw_fchw">) -> !transform.op<"linalg.generic">
208      transform.yield
209  }
210}
211
212
213// -----
214
215// These should fail to pack for now as they don't contain a contraction.
216// CHECK-LABEL: @reduce_and_map
217func.func @reduce_and_map(%arg0: tensor<10x100xf32>,
218    %arg1: tensor<10x100xf32>, %output: tensor<10xf32>) -> tensor<10xf32> {
219  %map_init = tensor.empty() : tensor<10x100xf32>
220  // CHECK: linalg.map
221  %mapped = linalg.map { arith.addf }
222              ins(%arg0, %arg1 : tensor<10x100xf32>, tensor<10x100xf32>)
223              outs(%map_init : tensor<10x100xf32>)
224  // CHECK: linalg.reduce
225  %res = linalg.reduce { arith.addf }
226           ins(%mapped: tensor<10x100xf32>)
227           outs(%output: tensor<10xf32>)
228           dimensions = [1]
229  return %res : tensor<10xf32>
230}
231
232module attributes {transform.with_named_sequence} {
233  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
234    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
235    transform.structured.pack_greedily %generic
236        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
237      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
238      transform.yield
239  }
240}
241
242// -----
243
244!A_mk = tensor<1023x255xf32>
245!B_nk = tensor<127x255xf32>
246!C_nm = tensor<127x1023xf32>
247
248#mkn_accesses = [
249  affine_map<(m, n, k) -> (m, k)>,
250  affine_map<(m, n, k) -> (n, k)>,
251  affine_map<(m, n, k) -> (n, m)>
252]
253#mkn_trait = {
254  indexing_maps = #mkn_accesses,
255  iterator_types = ["parallel", "parallel", "reduction"]
256}
257
258// Normalized dims are:                     ( k,  m,  n)(kk, mm, nn)
259// CHECK-DAG: #[[$km_kkmm:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d1, d0, d3, d4)>
260// CHECK-DAG: #[[$kn_kknn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d0, d3, d5)>
261// CHECK-DAG: #[[$mn_mmnn:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d4, d5)>
262
263// CHECK-LABEL: @matmul_mk_nk_nm(
264func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
265  //      CHECK: linalg.generic
266  // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
267  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel", "parallel"]}
268  // CHECK-SAME:   ins(%{{.*}} : tensor<128x8x32x8xf32>, tensor<1x8x32x130xf32>)
269  // CHECK-SAME:  outs(%{{.*}} : tensor<1x128x8x130xf32>)
270  %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
271    ^bb0(%a: f32, %b: f32, %c: f32):
272      %d = arith.mulf %a, %b : f32
273      %e = arith.addf %c, %d : f32
274      linalg.yield %e : f32
275  } -> !C_nm
276  return %0 : !C_nm
277}
278
279module attributes {transform.with_named_sequence} {
280  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
281    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
282    transform.structured.pack_greedily %generic
283        // In this spec, the "k" dimension is not packed but rather padded to the
284        // next multiple of 10 (i.e. 130).
285        matmul_packed_sizes = [8, 0, 32]
286        matmul_padded_sizes_next_multiple_of = [0, 10, 0]
287        matmul_inner_dims_order = [1, 2, 0]
288      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
289      transform.yield
290  }
291}
292
293
294// -----
295
296!A_mk = tensor<1023x255xf32>
297!B_nk = tensor<127x255xf32>
298!C_nm = tensor<127x1023xf32>
299
300#mkn_accesses = [
301  affine_map<(m, n, k) -> (m, k)>,
302  affine_map<(m, n, k) -> (n, k)>,
303  affine_map<(m, n, k) -> (n, m)>
304]
305#mkn_trait = {
306  indexing_maps = #mkn_accesses,
307  iterator_types = ["parallel", "parallel", "reduction"]
308}
309
310// Normalized dims are:                     ( k,  m,  n)(kk, mm)
311// CHECK-DAG: #[[$km_kkmm:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d1, d0, d3)>
312// CHECK-DAG: #[[$kn_kknn:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d0, d3, d4)>
313// CHECK-DAG: #[[$mn_mmnn:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d1, d4)>
314
315// CHECK-LABEL: @matmul_mk_nk_nm(
316func.func @matmul_mk_nk_nm(%A : !A_mk, %B : !B_nk, %C : !C_nm) -> !C_nm {
317  //      CHECK: linalg.generic
318  // CHECK-SAME: indexing_maps = [#[[$mk_kkmm]], #[[$kn_kknn]], #[[$mn_mmnn]]]
319  // CHECK-SAME:   ["reduction", "parallel", "parallel", "reduction", "parallel"]}
320  // CHECK-SAME:   ins(%{{.*}} : tensor<1023x8x32xf32>, tensor<1x8x32x130xf32>)
321  // CHECK-SAME:  outs(%{{.*}} : tensor<1x1023x130xf32>)
322  %0 = linalg.generic #mkn_trait ins(%A, %B : !A_mk, !B_nk) outs(%C : !C_nm) {
323    ^bb0(%a: f32, %b: f32, %c: f32):
324      %d = arith.mulf %a, %b : f32
325      %e = arith.addf %c, %d : f32
326      linalg.yield %e : f32
327  } -> !C_nm
328  return %0 : !C_nm
329}
330
331module attributes {transform.with_named_sequence} {
332  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
333    %generic = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.op<"linalg.generic">
334    transform.structured.pack_greedily %generic
335        // In this spec, the "n" dimension is neither packed not unpacked.
336        // We don't end up with an innermost matmul after packing but only with an
337        // innermost matvec.
338        matmul_packed_sizes = [0, 0, 32]
339        matmul_padded_sizes_next_multiple_of = [0, 10, 0]
340        matmul_inner_dims_order = [1, 2, 0]
341      : (!transform.op<"linalg.generic">) -> !transform.op<"linalg.generic">
342      transform.yield
343  }
344}
345
346// -----
347
348!A = tensor<1023x255xf32>
349!X = tensor<255xf32>
350!Y = tensor<1023xf32>
351
352// CHECK-LABEL: @matvec_fail(
353func.func @matvec_fail(%A : !A, %x : !X, %y : !Y) -> !Y {
354  //      CHECK: linalg.matvec
355  %0 = linalg.matvec ins(%A, %x : !A, !X) outs(%y : !Y) -> !Y
356  return %0 : !Y
357}
358
359module attributes {transform.with_named_sequence} {
360  transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
361    %matmul = transform.structured.match ops{["linalg.matvec"]} in %module_op
362      : (!transform.any_op) -> !transform.op<"linalg.matvec">
363    transform.structured.pack_greedily %matmul
364        matmul_packed_sizes = [8, 16, 32] matmul_inner_dims_order = [1, 2, 0]
365      : (!transform.op<"linalg.matvec">) -> !transform.any_op
366      transform.yield
367  }
368}
369
370// -----
371
372func.func @no_padding_on_packs(%A: tensor<32x32xf32>, %B: tensor<32x32xf32>, %C: tensor<32x32xf32>)
373    -> tensor<32x32xf32> {
374  %0 = linalg.matmul  ins(%A, %B: tensor<32x32xf32>, tensor<32x32xf32>)
375                     outs(%C: tensor<32x32xf32>)
376    -> tensor<32x32xf32>
377  return %0 : tensor<32x32xf32>
378}
379
380// CHECK-LABEL: no_padding_on_packs
381// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [8, 4]
382// CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<4x8x8x4xf32>
383// CHECK: tensor.pack %{{.+}} outer_dims_perm = [1, 0]
384// CHECK-SAME:  inner_dims_pos = [0, 1] inner_tiles = [4, 16] into %{{.+}} : tensor<32x32xf32> -> tensor<2x8x4x16xf32>
385// CHECK: tensor.pack %{{.+}} inner_dims_pos = [0, 1] inner_tiles = [8, 16]
386// CHECK-SAME:  into %{{.+}} : tensor<32x32xf32> -> tensor<4x2x8x16xf32>
387
388module attributes {transform.with_named_sequence} {
389  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
390      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
391        : (!transform.any_op) -> !transform.op<"linalg.matmul">
392      %1 = transform.structured.pack_greedily %0
393          matmul_packed_sizes = [8, 16, 4] matmul_inner_dims_order = [0, 1, 2]
394        : (!transform.op<"linalg.matmul">) -> !transform.op<"linalg.generic">
395      %pack = transform.get_producer_of_operand %1[1]
396      : (!transform.op<"linalg.generic">) -> (!transform.op<"tensor.pack">)
397      %2, %pack_2, %empty_unpack_2 =
398      transform.structured.pack_transpose %pack with_compute_op(%1)
399      outer_perm = [1, 0] inner_perm = [1, 0]
400       : (!transform.op<"tensor.pack">, !transform.op<"linalg.generic">)
401      -> (!transform.op<"linalg.generic">, !transform.op<"tensor.pack">, !transform.any_op)
402      transform.yield
403  }
404}
405