xref: /llvm-project/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir (revision 0d4efa27252cbbea4b5672d4d8ffc15a3ba51d83)
1// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
2
3// CHECK-LABEL: contraction_dot
4func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref<f32>) {
5
6// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584xf32>
7// CHECK: vector.multi_reduction <add>, %{{.*}}, {{.*}} [0] : vector<1584xf32> to f32
8  linalg.dot ins(%A, %B: memref<1584xf32>, memref<1584xf32>)
9            outs(%C: memref<f32>)
10  return
11}
12
13module attributes {transform.with_named_sequence} {
14  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
15    %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op
16    transform.structured.vectorize %0  : !transform.any_op
17    transform.yield
18  }
19}
20
21// -----
22
23// CHECK-LABEL: contraction_matvec
24func.func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %C: memref<1584xf32>) {
25
26// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584xf32>
27// CHECK: vector.multi_reduction <add>, %{{.*}}, {{.*}} [1] : vector<1584x1584xf32> to vector<1584xf32>
28  linalg.matvec ins(%A, %B: memref<1584x1584xf32>, memref<1584xf32>)
29            outs(%C: memref<1584xf32>)
30  return
31}
32
33module attributes {transform.with_named_sequence} {
34  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
35    %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
36    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
37    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
38    transform.yield
39  }
40}
41
42// -----
43
44// CHECK-LABEL: contraction_matmul
45func.func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf32>, %C: memref<1584x1584xf32>) {
46// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584xf32>
47// CHECK: vector.multi_reduction <add>, %{{.*}}, {{.*}} [2] : vector<1584x1584x1584xf32> to vector<1584x1584xf32>
48  linalg.matmul ins(%A, %B: memref<1584x1584xf32>, memref<1584x1584xf32>)
49            outs(%C: memref<1584x1584xf32>)
50  return
51}
52
53module attributes {transform.with_named_sequence} {
54  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
55    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
56    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
57    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
58    transform.yield
59  }
60}
61
62// -----
63
64// CHECK-LABEL: contraction_batch_matmul
65func.func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1584x1584x1584xf32>, %C: memref<1584x1584x1584xf32>) {
66// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584x1584xf32>
67// CHECK: vector.multi_reduction <add>, %{{.*}}, {{.*}} [3] : vector<1584x1584x1584x1584xf32> to vector<1584x1584x1584xf32>
68  linalg.batch_matmul
69    ins(%A, %B: memref<1584x1584x1584xf32>, memref<1584x1584x1584xf32>)
70   outs(%C: memref<1584x1584x1584xf32>)
71  return
72}
73
74module attributes {transform.with_named_sequence} {
75  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
76    %0 = transform.structured.match ops{["linalg.batch_matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
77    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
78    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
79    transform.yield
80  }
81}
82
83// -----
84
85// CHECK-LABEL: @matmul_as_contract
86// CHECK-SAME: %[[A:.*]]: tensor<24x12xf32>
87// CHECK-SAME: %[[B:.*]]: tensor<12x25xf32>
88// CHECK-SAME: %[[C:.*]]: tensor<24x25xf32>
89func.func @matmul_as_contract(%A: tensor<24x12xf32>,
90                              %B: tensor<12x25xf32>,
91                              %C: tensor<24x25xf32>) -> tensor<24x25xf32> {
92  // CHECK: %[[vA:.+]] = vector.transfer_read %[[A]]
93  // CHECK: %[[vB:.+]] = vector.transfer_read %[[B]]
94  // CHECK: %[[vC:.+]] = vector.transfer_read %[[C]]
95  // CHECK: %[[vR:.+]] = vector.contract {{.*}} %[[vA]], %[[vB]], %[[vC]]
96  // CHECK: vector.transfer_write %[[vR]], %[[C]]
97  %0 = linalg.contract
98      indexing_maps = [affine_map<(m, n, k) -> (m, k)>,
99                       affine_map<(m, n, k) -> (k, n)>,
100                       affine_map<(m, n, k) -> (m, n)>]
101      ins(%A, %B : tensor<24x12xf32>, tensor<12x25xf32>)
102      outs(%C : tensor<24x25xf32>) -> tensor<24x25xf32>
103  func.return %0 : tensor<24x25xf32>
104}
105
106module attributes {transform.with_named_sequence} {
107  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
108    %0 = transform.structured.match ops{["linalg.contract"]} in %arg1 : (!transform.any_op) -> !transform.any_op
109    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
110    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
111    // TODO: also tests the other available vectorization strategies
112    transform.yield
113  }
114}
115
116// -----
117
118#matmul_trait = {
119  indexing_maps = [
120    affine_map<(m, n, k) -> (m, k)>,
121    affine_map<(m, n, k) -> (k, n)>,
122    affine_map<(m, n, k) -> (m, n)>
123  ],
124  iterator_types = ["parallel", "parallel", "reduction"]
125}
126
127// CHECK-LABEL: func @vectorization_test
128func.func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
129                         %C: memref<8x32xf32>) {
130  //       CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32>
131  //       CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32>
132  //       CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<8x32xf32>, vector<8x32xf32>
133  //       CHECK: %[[MUL:.*]] = arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32>
134  //       CHECK: %[[R:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[ACC]] [2] : vector<8x32x16xf32> to vector<8x32xf32>
135  //       CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<8x32xf32>
136  linalg.generic #matmul_trait
137    ins(%A, %B : memref<8x16xf32>, memref<16x32xf32>)
138   outs(%C : memref<8x32xf32>) {
139    ^bb(%a: f32, %b: f32, %c: f32) :
140      %d = arith.mulf %a, %b: f32
141      %e = arith.addf %c, %d: f32
142      linalg.yield %e : f32
143  }
144  return
145}
146
147module attributes {transform.with_named_sequence} {
148  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
149    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
150    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
151    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
152    transform.yield
153  }
154}
155
156// -----
157
158#map = affine_map<() -> ()>
159
160// CHECK-LABEL:   func.func @generic_0d(
161// CHECK-SAME:     %[[ARG_0:.*]]: tensor<f32>, %[[ARG_1:.*]]: tensor<f32>, %[[ARG_2:.*]]: tensor<f32>)
162func.func @generic_0d(%arg0: tensor<f32>, %arg1: tensor<f32>,
163                      %arg2: tensor<f32>) -> tensor<f32> {
164// CHECK:           %[[PAD:.*]] = arith.constant 0.000000e+00 : f32
165// CHECK:           %[[READ_0:.*]] = vector.transfer_read %[[ARG_0]][], %[[PAD]] : tensor<f32>, vector<f32>
166// CHECK:           %[[ARG_0_AS_SCALAR:.*]] = vector.extract %[[READ_0]][] : f32 from vector<f32>
167// CHECK:           %[[READ_1:.*]] = vector.transfer_read %[[ARG_1]][], %[[PAD]] : tensor<f32>, vector<f32>
168// CHECK:           %[[ARG_1_AS_SCALAR:.*]] = vector.extract %[[READ_1]][] : f32 from vector<f32>
169// CHECK:           %[[READ_2:.*]] = vector.transfer_read %[[ARG_2]][], %[[PAD]] : tensor<f32>, vector<f32>
170// CHECK:           %[[ARG_2_AS_SCALAR:.*]] = vector.extract %[[READ_2]][] : f32 from vector<f32>
171// CHECK:           %[[MULF:.*]] = arith.mulf %[[ARG_0_AS_SCALAR]], %[[ARG_1_AS_SCALAR]] : f32
172// CHECK:           %[[ADDF:.*]] = arith.addf %[[ARG_2_AS_SCALAR]], %[[MULF]] : f32
173// CHECK:           %[[ADDF_BCAST:.*]] = vector.broadcast %[[ADDF]] : f32 to vector<f32>
174// CHECK:           vector.transfer_write %[[ADDF_BCAST]], %[[ARG_2]][] : vector<f32>, tensor<f32>
175  %res = linalg.generic {
176    indexing_maps = [#map, #map, #map],
177    iterator_types = []
178  } ins(%arg0, %arg1 : tensor<f32>, tensor<f32>)
179    outs(%arg2 : tensor<f32>) {
180  ^bb(%a: f32, %b: f32, %c: f32) :
181    %d = arith.mulf %a, %b: f32
182    %e = arith.addf %c, %d: f32
183    linalg.yield %e : f32
184  } -> tensor<f32>
185
186  return %res : tensor<f32>
187}
188
189module attributes {transform.with_named_sequence} {
190  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
191    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
192    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
193    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
194    transform.yield
195  }
196}
197
198// -----
199
200#matmul_transpose_out_trait = {
201  indexing_maps = [
202    affine_map<(m, n, k) -> (m, k)>,
203    affine_map<(m, n, k) -> (k, n)>,
204    affine_map<(m, n, k) -> (n, m)>
205  ],
206  iterator_types = ["parallel", "parallel", "reduction"]
207}
208
209// CHECK-LABEL: func @generic_output_transpose
210func.func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
211                                    %C: memref<32x8xf32>) {
212  //       CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32>
213  //       CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32>
214  //       CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<32x8xf32>, vector<8x32xf32>
215  //       CHECK: %[[MUL:.*]] = arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32>
216  //       CHECK: %[[R:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[ACC]] [2] : vector<8x32x16xf32> to vector<8x32xf32>
217  //       CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<32x8xf32>
218  linalg.generic #matmul_transpose_out_trait
219    ins(%A, %B : memref<8x16xf32>, memref<16x32xf32>)
220   outs(%C : memref<32x8xf32>) {
221    ^bb(%a: f32, %b: f32, %c: f32) :
222      %d = arith.mulf %a, %b: f32
223      %e = arith.addf %c, %d: f32
224      linalg.yield %e : f32
225  }
226  return
227}
228
229module attributes {transform.with_named_sequence} {
230  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
231    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
232    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
233    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
234    transform.yield
235  }
236}
237
238// -----
239
240#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
241#map1 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
242// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
243// CHECK: func @generic_interchanged_transpose
244func.func @generic_interchanged_transpose(%arg0: tensor<12x128x32xf32>) -> tensor<128x12x32xf32> {
245  // CHECK: %[[IN:.+]] = vector.transfer_read
246  // CHECK: vector.transfer_write %[[IN]], {{.+}} permutation_map = #[[MAP]]
247  %0 = tensor.empty() : tensor<128x12x32xf32>
248  %1 = linalg.generic {indexing_maps = [#map0, #map1],
249                       iterator_types = ["parallel", "parallel", "parallel"]}
250    ins(%arg0 : tensor<12x128x32xf32>)
251    outs(%0 : tensor<128x12x32xf32>) {
252  ^bb0(%arg1: f32, %arg2: f32):
253    linalg.yield %arg1 : f32
254  } -> tensor<128x12x32xf32>
255  return %1 : tensor<128x12x32xf32>
256}
257
258module attributes {transform.with_named_sequence} {
259  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
260    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
261    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
262    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
263    transform.yield
264  }
265}
266
267// -----
268
269#matmul_trait = {
270  indexing_maps = [
271    affine_map<(m, n, k) -> (m, k)>,
272    affine_map<(m, n, k) -> (k, n)>,
273    affine_map<(m, n, k) -> (m, n)>
274  ],
275  iterator_types = ["parallel", "parallel", "reduction"]
276}
277
278// CHECK-LABEL: func @vectorization_test_integer
279func.func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32>,
280                                 %C: memref<8x32xi32>) {
281  //       CHECK: vector.transfer_read %{{.*}} : memref<8x16xi32>, vector<8x32x16xi32>
282  //       CHECK: vector.transfer_read %{{.*}} : memref<16x32xi32>, vector<8x32x16xi32>
283  //       CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<8x32xi32>, vector<8x32xi32>
284  //       CHECK: %[[MUL:.*]] = arith.muli %{{.*}}, %{{.*}} : vector<8x32x16xi32>
285  //       CHECK: vector.multi_reduction <add>, %[[MUL]], %[[ACC]] [2] : vector<8x32x16xi32> to vector<8x32xi32>
286  //       CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xi32>, memref<8x32xi32>
287  linalg.generic #matmul_trait
288    ins(%A, %B : memref<8x16xi32>, memref<16x32xi32>)
289   outs(%C : memref<8x32xi32>) {
290    ^bb(%a: i32, %b: i32, %c: i32) :
291      %d = arith.muli %a, %b: i32
292      %e = arith.addi %c, %d: i32
293      linalg.yield %e : i32
294  }
295  return
296}
297
298module attributes {transform.with_named_sequence} {
299  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
300    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
301    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
302    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
303    transform.yield
304  }
305}
306
307// -----
308
309// CHECK-LABEL: func @vectorization_test_2
310func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
311                         %C: memref<8x32xf32>) {
312  //       CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32>
313  //       CHECK: vector.multi_reduction <add>, %{{.*}}, {{.*}} [2] : vector<8x32x16xf32> to vector<8x32xf32>
314  linalg.matmul
315    ins(%A, %B: memref<8x16xf32>, memref<16x32xf32>)
316   outs(%C: memref<8x32xf32>)
317  return
318}
319
320module attributes {transform.with_named_sequence} {
321  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
322    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
323    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
324    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op
325    transform.yield
326  }
327}
328
329// -----
330
331// CHECK-LABEL: func @test_vectorize_scalar_input
332func.func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) {
333  //       CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32>
334  //       CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
335  linalg.generic {
336    indexing_maps = [affine_map<(m, n) -> ()>, affine_map<(m, n) -> (m, n)>],
337    iterator_types = ["parallel", "parallel"]}
338   ins(%arg0 : f32)
339  outs(%A: memref<8x16xf32>) {
340    ^bb(%0: f32, %1: f32) :
341      linalg.yield %0 : f32
342  }
343  return
344}
345
346module attributes {transform.with_named_sequence} {
347  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
348    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
349    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
350    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
351    transform.yield
352  }
353}
354
355// -----
356
357// CHECK-LABEL: func @test_do_not_vectorize_unsupported_element_types
358func.func @test_do_not_vectorize_unsupported_element_types(%A : memref<8x16xcomplex<f32>>, %arg0 : complex<f32>) {
359  // CHECK-NOT: vector.broadcast
360  // CHECK-NOT: vector.transfer_write
361  linalg.generic {
362    indexing_maps = [affine_map<(m, n) -> ()>, affine_map<(m, n) -> (m, n)>],
363    iterator_types = ["parallel", "parallel"]}
364   ins(%arg0 : complex<f32>)
365  outs(%A: memref<8x16xcomplex<f32>>) {
366    ^bb(%0: complex<f32>, %1: complex<f32>) :
367      linalg.yield %0 : complex<f32>
368  }
369  return
370}
371
372module attributes {transform.with_named_sequence} {
373  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
374    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
375    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
376    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
377    transform.yield
378  }
379}
380
381// -----
382
383#map0 = affine_map<(d0) -> (d0)>
384
385func.func @vectorize_affine_apply(%arg0: tensor<5xf32>, %arg3: index) -> tensor<5xi32> {
386  %0 = tensor.empty() : tensor<5xi32>
387  %1 = linalg.generic {indexing_maps = [#map0, #map0],
388                       iterator_types = ["parallel"]}
389    ins(%arg0 : tensor<5xf32>)
390    outs(%0 : tensor<5xi32>) {
391  ^bb0(%arg1: f32, %arg2: i32):
392    %2 = linalg.index 0 : index
393    %11 = affine.apply affine_map<() -> (123)>()
394    %12 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %11)
395    %13 = affine.apply affine_map<(d0)[s0] -> (d0 + s0)>(%12)[%arg3]
396    %14 = affine.apply affine_map<(d0) -> (d0 + 1)>(%13)
397    %15 = affine.apply affine_map<(d0, d1, d2) -> (d0 + d1 + d2)>(%13, %14, %12)
398    %3 = arith.index_cast %15 : index to i32
399    linalg.yield %3 : i32
400  } -> tensor<5xi32>
401  return %1 : tensor<5xi32>
402}
403
404// CHECK-LABEL:  func.func @vectorize_affine_apply
405// CHECK-SAME: %arg0: tensor<5xf32>
406// CHECK-SAME: %[[ARG1:.*]]: index
407// CHECK-DAG: %[[CST:.*]] = arith.constant dense<[123, 124, 125, 126, 127]> : vector<5xindex>
408// CHECK-DAG: %[[CST_0:.*]] = arith.constant dense<1> : vector<5xindex>
409// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
410// CHECK:   %[[EMPTY:.*]] = tensor.empty() : tensor<5xi32>
411// CHECK:   %[[BCAST:.*]] = vector.broadcast %[[ARG1]] : index to vector<5xindex>
412// CHECK:   %[[ADDI_1:.*]] = arith.addi %[[BCAST]], %[[CST]] : vector<5xindex>
413// CHECK:   %[[ADDI_2:.*]] = arith.addi %[[ADDI_1]], %[[CST_0]] : vector<5xindex>
414// CHECK:   %[[ADDI_3:.*]] = arith.addi %[[ADDI_1]], %[[ADDI_2]] : vector<5xindex>
415// CHECK:   %[[ADDI_4:.*]] = arith.addi %[[ADDI_3]], %[[CST]] : vector<5xindex>
416// CHECK:   %[[CAST:.*]] = arith.index_cast %[[ADDI_4]] : vector<5xindex> to vector<5xi32>
417// CHECK:   vector.transfer_write %[[CAST]], %[[EMPTY]][%[[C0:.*]]] {in_bounds = [true]} : vector<5xi32>, tensor<5xi32>
418
419module attributes {transform.with_named_sequence} {
420  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
421     %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
422     %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
423     %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op
424     transform.yield
425  }
426}
427
428// -----
429
430// CHECK-LABEL: func @test_vectorize_fill
431func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
432  //       CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32>
433  //       CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
434  linalg.fill ins(%arg0 : f32) outs(%A : memref<8x16xf32>)
435  return
436}
437
438module attributes {transform.with_named_sequence} {
439  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
440    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
441    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
442    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
443    transform.yield
444  }
445}
446
447// -----
448
449// CHECK-LABEL: func @test_vectorize_fill
450func.func @test_vectorize_fill_0d(%A : memref<f32>, %arg0 : f32) {
451  // CHECK-SAME: (%[[M:.*]]: memref<f32>, %[[val:.*]]: f32)
452  //      CHECK:   %[[VEC:.*]] = vector.broadcast %[[val]] : f32 to vector<f32>
453  //      CHECK:   vector.transfer_write %[[VEC]], %[[M]][] : vector<f32>, memref<f32>
454  linalg.fill ins(%arg0 : f32) outs(%A : memref<f32>)
455  return
456}
457
458module attributes {transform.with_named_sequence} {
459  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
460    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
461    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
462    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
463    transform.yield
464  }
465}
466
467// -----
468
469// CHECK-LABEL: func @test_vectorize_copy
470func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
471  //       CHECK: %[[V:.*]] = vector.transfer_read {{.*}} : memref<8x16xf32>, vector<8x16xf32>
472  //       CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
473  memref.copy %A, %B :  memref<8x16xf32> to memref<8x16xf32>
474  return
475}
476
477module attributes {transform.with_named_sequence} {
478  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
479    %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
480    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
481    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
482    transform.yield
483  }
484}
485
486// -----
487
488// CHECK-LABEL: func @test_vectorize_copy_0d
489func.func @test_vectorize_copy_0d(%A : memref<f32>, %B : memref<f32>) {
490  //  CHECK-SAME: (%[[A:.*]]: memref<f32>, %[[B:.*]]: memref<f32>)
491  //       CHECK:   %[[V:.*]] = vector.transfer_read %[[A]][]{{.*}} : memref<f32>, vector<f32>
492  //       CHECK:   %[[val:.*]] = vector.extract %[[V]][] : f32 from vector<f32>
493  //       CHECK:   %[[VV:.*]] = vector.broadcast %[[val]] : f32 to vector<f32>
494  //       CHECK:   vector.transfer_write %[[VV]], %[[B]][] : vector<f32>, memref<f32>
495  memref.copy %A, %B :  memref<f32> to memref<f32>
496  return
497}
498
499module attributes {transform.with_named_sequence} {
500  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
501    %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
502    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
503    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
504    transform.yield
505  }
506}
507
508// -----
509
510// CHECK-LABEL: func @test_vectorize_copy_complex
511// CHECK-NOT: vector<
512func.func @test_vectorize_copy_complex(%A : memref<8x16xcomplex<f32>>, %B : memref<8x16xcomplex<f32>>) {
513  memref.copy %A, %B :  memref<8x16xcomplex<f32>> to memref<8x16xcomplex<f32>>
514  return
515}
516
517module attributes {transform.with_named_sequence} {
518  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
519    %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op
520    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
521    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
522    transform.yield
523  }
524}
525
526// -----
527
528// CHECK-LABEL: func @test_vectorize_trailing_index
529  //  CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>)
530func.func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) {
531  //   CHECK-DAG:   %[[CST0:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : vector<8xindex>
532  //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
533  linalg.generic {
534    indexing_maps = [
535      affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>],
536    iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
537  outs(%arg0: memref<1x2x4x8xindex>) {
538  ^bb0(%arg1: index):
539  //       CHECK:   %[[BCST:.*]] = vector.broadcast %[[CST0]] : vector<8xindex> to vector<1x2x4x8xindex>
540  //       CHECK:   vector.transfer_write %[[BCST]], %[[ARG0]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] {{.*}} : vector<1x2x4x8xindex>, memref<1x2x4x8xindex>
541    %0 = linalg.index 3 : index
542    linalg.yield %0 : index
543  }
544  return
545}
546
547module attributes {transform.with_named_sequence} {
548  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
549    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
550    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
551    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
552    transform.yield
553  }
554}
555
556// -----
557
558// CHECK-LABEL: func @test_vectorize_inner_index
559  //  CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>)
560func.func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) {
561  //   CHECK-DAG:   %[[CST0:.*]] = arith.constant dense<[0, 1]> : vector<2xindex>
562  //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
563  linalg.generic {
564    indexing_maps = [
565      affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>],
566    iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
567  outs(%arg0: memref<1x2x4x8xindex>) {
568  ^bb0(%arg1: index):
569  //       CHECK:   %[[BCST:.*]] = vector.broadcast %[[CST0]] : vector<2xindex> to vector<1x8x4x2xindex>
570  //       CHECK:   %[[TRAN:.*]] = vector.transpose %[[BCST]], [0, 3, 2, 1] : vector<1x8x4x2xindex> to vector<1x2x4x8xindex>
571  //       CHECK:   vector.transfer_write %[[TRAN]], %[[ARG0]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] {{.*}} : vector<1x2x4x8xindex>, memref<1x2x4x8xindex>
572    %0 = linalg.index 1 : index
573    linalg.yield %0 : index
574  }
575  return
576}
577
578module attributes {transform.with_named_sequence} {
579  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
580    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
581    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
582    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
583    transform.yield
584  }
585}
586
587// -----
588
589// CHECK-LABEL: func @generic_vectorize
590  //  CHECK-SAME: (%[[ARG0:.*]]: memref<4x256xf32>, %[[ARG1:.*]]: memref<4x256xf32>,
591  //  CHECK-SAME:  %[[ARG2:.*]]: memref<256xf32>, %[[ARG3:.*]]: f32)
592func.func @generic_vectorize(%arg0: memref<4x256xf32>,
593                        %arg1: memref<4x256xf32>,
594                        %arg2: memref<256xf32>, %i: f32) {
595  //   CHECK-DAG:   %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<4x256xf32>
596  //   CHECK-DAG:   %[[CST1:.*]] = arith.constant dense<1.000000e+00> : vector<4x256xf32>
597  //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
598  %c1_f32 = arith.constant 1.0 : f32
599  linalg.generic {
600    indexing_maps = [
601      affine_map<(d0, d1) -> (d0, d1)>,
602      affine_map<(d0, d1) -> (d1)>,
603      affine_map<(d0, d1) -> (d0, d1)>,
604      affine_map<(d0, d1) -> (d0, d1)>,
605      affine_map<(d0, d1) -> (d0, d1)>,
606      affine_map<(d0, d1) -> (d0, d1)>,
607      affine_map<(d0, d1) -> (d0, d1)>,
608      affine_map<(d0, d1) -> (d0, d1)>,
609      affine_map<(d0, d1) -> (d0, d1)>,
610      affine_map<(d0, d1) -> (d0, d1)>,
611      affine_map<(d0, d1) -> (d0, d1)>,
612      affine_map<(d0, d1) -> (d0, d1)>],
613    iterator_types = ["parallel", "parallel"]}
614  ins(%arg1, %arg2: memref<4x256xf32>, memref<256xf32>)
615  outs(
616    %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0 :
617    memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>,
618    memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>,
619    memref<4x256xf32>, memref<4x256xf32>) {
620  ^bb0(%arg3 : f32, %arg4 : f32, %arg5: f32, %arg6: f32, %arg7: f32, %arg8: f32,
621  //       CHECK:   %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32>
622  //       CHECK:   %[[V0:.*]] = vector.transfer_read %[[ARG2]][%[[C0]]], {{.*}} : memref<256xf32>, vector<4x256xf32>
623  //       CHECK:   %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32>
624  //       CHECK:   %[[V1:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32>
625    %arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32,
626    %arg14 : f32):
627  //       CHECK:   %[[ADD:.*]] = arith.addf %[[V0]], %[[V1]] : vector<4x256xf32>
628    %6 = arith.addf %arg4, %arg6 : f32
629  //       CHECK:   %[[CMP:.*]] = arith.cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32>
630    %7 = arith.cmpf ogt, %arg3, %arg6 : f32
631  //       CHECK:   %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32>
632    %8 = arith.constant 2.0 : f32
633  //       CHECK:   %[[DIV:.*]] = arith.divf %[[V3]], %[[ARG3B]] : vector<4x256xf32>
634    %9 = arith.divf %arg5, %i : f32
635  //       CHECK:   %[[EXP:.*]] = math.exp2 %[[V3]] : vector<4x256xf32>
636    %10 = math.exp2 %arg5 : f32
637  //       CHECK:   %[[MUL:.*]] = arith.mulf %[[V3]], %[[CST0]] : vector<4x256xf32>
638    %11 = arith.mulf %arg5, %8 : f32
639  //       CHECK:   %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32>
640    %12 = math.rsqrt %arg5 : f32
641  //       CHECK:   %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32>
642    %13 = arith.select %7, %arg5, %arg6 : f32
643  //       CHECK:   %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32>
644    %14 = arith.subf %arg5, %arg4 : f32
645  //       CHECK:   %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32>
646    %15 = math.tanh %arg5 : f32
647  //       CHECK:   vector.transfer_write %[[ADD]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
648  //       CHECK:   vector.transfer_write %[[CST0]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
649  //       CHECK:   vector.transfer_write %[[CST1]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
650  //       CHECK:   vector.transfer_write %[[DIV]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
651  //       CHECK:   vector.transfer_write %[[EXP]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
652  //       CHECK:   vector.transfer_write %[[MUL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
653  //       CHECK:   vector.transfer_write %[[RSQRT]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
654  //       CHECK:   vector.transfer_write %[[SEL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
655  //       CHECK:   vector.transfer_write %[[SUB]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
656  //       CHECK:   vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32>
657    linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32,
658      f32, f32, f32, f32, f32, f32, f32, f32
659  }
660  return
661}
662
663module attributes {transform.with_named_sequence} {
664  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
665    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
666    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
667    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
668    transform.yield
669  }
670}
671
672// -----
673
674// CHECK-LABEL: func @generic_vectorize_tensor
675//  CHECK-SAME: (%[[ARG0:.*]]: tensor<4x256xf32>, %[[ARG1:.*]]: tensor<4x256xf32>,
676//  CHECK-SAME:  %[[ARG2:.*]]: tensor<256xf32>, %[[ARG3:.*]]: f32)
677func.func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
678  %arg1: tensor<4x256xf32>, %arg2: tensor<256xf32>,
679  %i: f32) -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
680    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
681    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>) {
682  %c1_f32 = arith.constant 1.0 : f32
683  %r:10 = linalg.generic {
684    indexing_maps = [
685      affine_map<(d0, d1) -> (d0, d1)>,
686      affine_map<(d0, d1) -> (d1)>,
687      affine_map<(d0, d1) -> (d0, d1)>,
688      affine_map<(d0, d1) -> (d0, d1)>,
689      affine_map<(d0, d1) -> (d0, d1)>,
690      affine_map<(d0, d1) -> (d0, d1)>,
691      affine_map<(d0, d1) -> (d0, d1)>,
692      affine_map<(d0, d1) -> (d0, d1)>,
693      affine_map<(d0, d1) -> (d0, d1)>,
694      affine_map<(d0, d1) -> (d0, d1)>,
695      affine_map<(d0, d1) -> (d0, d1)>,
696      affine_map<(d0, d1) -> (d0, d1)>],
697    iterator_types = ["parallel", "parallel"]}
698  ins(%arg1, %arg2: tensor<4x256xf32>, tensor<256xf32>)
699  outs(
700    %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0 :
701    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
702    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
703    tensor<4x256xf32>, tensor<4x256xf32>) {
704  ^bb0(%arg3 : f32, %arg4 : f32, %arg5: f32, %arg6: f32, %arg7: f32, %arg8: f32,
705    %arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32,
706    %arg14 : f32):
707  //   CHECK-DAG:   %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<4x256xf32>
708  //   CHECK-DAG:   %[[CST1:.*]] = arith.constant dense<1.000000e+00> : vector<4x256xf32>
709  //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
710  //       CHECK:   %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32>
711  //       CHECK:   %[[V0:.*]] = vector.transfer_read %[[ARG2]][%[[C0]]], {{.*}} : tensor<256xf32>, vector<4x256xf32>
712  //       CHECK:   %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32>
713  //       CHECK:   %[[V1:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32>
714  //       CHECK:   %[[ADD:.*]] = arith.addf %[[V0]], %[[V1]] : vector<4x256xf32>
715    %6 = arith.addf %arg4, %arg6 : f32
716  //       CHECK:   %[[CMP:.*]] = arith.cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32>
717    %7 = arith.cmpf ogt, %arg3, %arg6 : f32
718  //       CHECK:   %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32>
719    %8 = arith.constant 2.0 : f32
720  //       CHECK:   %[[DIV:.*]] = arith.divf %[[V3]], %[[ARG3B]] : vector<4x256xf32>
721    %9 = arith.divf %arg5, %i : f32
722  //       CHECK:   %[[EXP:.*]] = math.exp2 %[[V3]] : vector<4x256xf32>
723    %10 = math.exp2 %arg5 : f32
724  //       CHECK:   %[[MUL:.*]] = arith.mulf %[[V3]], %[[CST0]] : vector<4x256xf32>
725    %11 = arith.mulf %arg5, %8 : f32
726  //       CHECK:   %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32>
727    %12 = math.rsqrt %arg5 : f32
728  //       CHECK:   %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32>
729    %13 = arith.select %7, %arg5, %arg6 : f32
730  //       CHECK:   %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32>
731    %14 = arith.subf %arg5, %arg4 : f32
732  //       CHECK:   %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32>
733    %15 = math.tanh %arg5 : f32
734  //       CHECK:   %[[R0:.*]] = vector.transfer_write %[[ADD]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
735  //       CHECK:   %[[R1:.*]] = vector.transfer_write %[[CST0]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
736  //       CHECK:   %[[R2:.*]] = vector.transfer_write %[[CST1]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
737  //       CHECK:   %[[R3:.*]] = vector.transfer_write %[[DIV]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
738  //       CHECK:   %[[R4:.*]] = vector.transfer_write %[[EXP]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
739  //       CHECK:   %[[R5:.*]] = vector.transfer_write %[[MUL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
740  //       CHECK:   %[[R6:.*]] = vector.transfer_write %[[RSQRT]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
741  //       CHECK:   %[[R7:.*]] = vector.transfer_write %[[SEL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
742  //       CHECK:   %[[R8:.*]] = vector.transfer_write %[[SUB]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
743  //       CHECK:   %[[R9:.*]] = vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
744    linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32,
745      f32, f32, f32, f32, f32, f32, f32, f32
746  } -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
747    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
748    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>)
749  //       CHECK:   return %[[R0]], %[[R1]], %[[R2]], %[[R3]], %[[R4]], %[[R5]], %[[R6]], %[[R7]], %[[R8]], %[[R9]] : tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
750  return %r#0, %r#1, %r#2, %r#3, %r#4, %r#5, %r#6, %r#7, %r#8, %r#9:
751    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
752    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
753    tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
754}
755
756module attributes {transform.with_named_sequence} {
757  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
758    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
759    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
760    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
761    transform.yield
762  }
763}
764
765// -----
766
767// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, 0, 0, d1)>
768// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (d0, 0, 0, 0)>
769// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0) -> (0, 0, d0, 0)>
770// CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1) -> (d1, 0, d0, 0)>
771//     CHECK: func @generic_vectorize_broadcast_transpose
772// CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
773// CHECK-DAG:   %[[CF:.*]] = arith.constant 0.000000e+00 : f32
774//     CHECK:   %[[V0:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP0]]} : memref<4x4xf32>, vector<4x4x4x4xf32>
775//     CHECK:   %[[V1:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP1]]} : memref<4xf32>, vector<4x4x4x4xf32>
776//     CHECK:   %[[V2:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP2]]} : memref<4xf32>, vector<4x4x4x4xf32>
777//     CHECK:   %[[V3:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP3]]} : memref<4x4xf32>, vector<4x4x4x4xf32>
778//     CHECK:   %[[SUB:.*]] = arith.subf %[[V0]], %[[V1]] : vector<4x4x4x4xf32>
779//     CHECK:   %[[ADD0:.*]] = arith.addf %[[V2]], %[[SUB]] : vector<4x4x4x4xf32>
780//     CHECK:   %[[ADD1:.*]] = arith.addf %[[V3]], %[[ADD0]] : vector<4x4x4x4xf32>
781//     CHECK: vector.transfer_write %[[ADD1]], {{.*}} : vector<4x4x4x4xf32>, memref<4x4x4x4xf32>
782func.func @generic_vectorize_broadcast_transpose(
783  %A: memref<4xf32>, %B: memref<4x4xf32>, %C: memref<4x4x4x4xf32>) {
784  linalg.generic {
785  indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d3)>,
786                   affine_map<(d0, d1, d2, d3) -> (d0)>,
787                   affine_map<(d0, d1, d2, d3) -> (d2)>,
788                   affine_map<(d0, d1, d2, d3) -> (d2, d0)>,
789                   affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>],
790  iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
791  ins(%B, %A, %A, %B: memref<4x4xf32>, memref<4xf32>, memref<4xf32>, memref<4x4xf32>)
792  outs(%C : memref<4x4x4x4xf32>) {
793  ^bb0(%arg0: f32, %arg1: f32, %arg2: f32, %arg3: f32, %arg4: f32):
794    %s = arith.subf %arg0, %arg1 : f32
795    %a = arith.addf %arg2, %s : f32
796    %b = arith.addf %arg3, %a : f32
797    linalg.yield %b : f32
798  }
799  return
800}
801
802module attributes {transform.with_named_sequence} {
803  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
804    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
805    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
806    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
807    transform.yield
808  }
809}
810
811// -----
812
813// Test different input maps.
814#matmul_trait = {
815  indexing_maps = [
816    affine_map<(d0, d1, d2, d3) -> (d1, d0)>,
817    affine_map<(d0, d1, d2, d3) -> (d3, d1)>,
818    affine_map<(d0, d1, d2, d3) -> (d3, d1, d0, d2)>,
819    affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
820  ],
821  iterator_types = ["parallel", "parallel", "parallel", "parallel"]
822}
823
824// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1, d0, 0, 0)>
825// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (0, d1, 0, d0)>
826// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d2, d1, d3, d0)>
827//       CHECK: func @vectorization_transpose
828//       CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP0]]} : memref<14x7xf32>, vector<7x14x8x16xf32>
829//       CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP1]]} : memref<16x14xf32>, vector<7x14x8x16xf32>
830//       CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP2]]} : memref<16x14x7x8xf32>, vector<7x14x8x16xf32>
831//       CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32>
832//       CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32>
833//       CHECK: vector.transfer_write {{.*}} : vector<7x14x8x16xf32>, memref<7x14x8x16xf32>
834func.func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>,
835                         %C: memref<16x14x7x8xf32>, %D: memref<7x14x8x16xf32>) {
836  linalg.generic #matmul_trait
837    ins(%A, %B, %C : memref<14x7xf32>, memref<16x14xf32>, memref<16x14x7x8xf32>)
838   outs(%D : memref<7x14x8x16xf32>) {
839    ^bb(%a: f32, %b: f32, %c: f32, %d: f32) :
840      %e = arith.addf %a, %b: f32
841      %f = arith.addf %e, %c: f32
842      linalg.yield %f : f32
843  }
844  return
845}
846
847module attributes {transform.with_named_sequence} {
848  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
849    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
850    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
851    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
852    transform.yield
853  }
854}
855
856// -----
857
858// CHECK-LABEL: func @matmul_tensors
859//  CHECK-SAME: (%[[ARG0:.*]]: tensor<8x4xf32>, %[[ARG1:.*]]: tensor<4x12xf32>,
860//  CHECK-SAME:  %[[ARG2:.*]]: tensor<8x12xf32>) -> tensor<8x12xf32>
861func.func @matmul_tensors(
862  %arg0: tensor<8x4xf32>, %arg1: tensor<4x12xf32>, %arg2: tensor<8x12xf32>)
863    -> tensor<8x12xf32> {
864  //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
865  //   CHECK-DAG:   %[[V0:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x4xf32>, vector<8x12x4xf32>
866  //   CHECK-DAG:   %[[V1:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x12xf32>, vector<8x12x4xf32>
867  //   CHECK-DAG:   %[[V2:.*]] = vector.transfer_read %[[ARG2]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x12xf32>, vector<8x12xf32>
868  //
869  // linalg matmul lowers gets expanded to a 3D reduction, canonicalization later
870  // convert it to a 2D contract.
871  //       CHECK:   %[[MUL:.*]] = arith.mulf %[[V0]], %[[V1]] : vector<8x12x4xf32>
872  //       CHECK:   %[[R:.*]] = vector.multi_reduction <add>, %[[MUL]], %[[V2]] [2] : vector<8x12x4xf32> to vector<8x12xf32>
873  //       CHECK:   %[[W:.*]] = vector.transfer_write %[[R]], %[[ARG2]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<8x12xf32>, tensor<8x12xf32>
874  %0 = linalg.matmul  ins(%arg0, %arg1: tensor<8x4xf32>, tensor<4x12xf32>)
875                     outs(%arg2: tensor<8x12xf32>)
876    -> tensor<8x12xf32>
877  //       CHECK:   return %[[W]] : tensor<8x12xf32>
878  return %0 : tensor<8x12xf32>
879}
880
881module attributes {transform.with_named_sequence} {
882  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
883    %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
884    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
885    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
886    transform.yield
887  }
888}
889
890// -----
891
892// CHECK-LABEL: func @pad_static(
893//  CHECK-SAME:                  %[[ARG0:.*]]: tensor<2x?x2xf32>, %[[PAD:.*]]: f32
894//   CHECK-NOT:   tensor.pad
895//   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
896//   CHECK-DAG:   %[[C2:.*]] = arith.constant 2 : index
897//   CHECK-DAG:   %[[INIT:.*]] = tensor.empty() : tensor<2x3x4xf32>
898//   CHECK-DAG:   %[[VEC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<2x3x4xf32>
899//       CHECK:   %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]]{{.*}} : vector<2x3x4xf32>, tensor<2x3x4xf32>
900//       CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %[[PAD]] {in_bounds = [true, false, true]} : tensor<2x?x2xf32>, vector<2x3x2xf32>
901//       CHECK:   %[[RESULT:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x3x2xf32>, tensor<2x3x4xf32>
902//       CHECK:   return %[[RESULT]]
903func.func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4xf32> {
904  %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] {
905    ^bb0(%arg1: index, %arg2: index, %arg3: index):
906      tensor.yield %pad_value : f32
907    } : tensor<2x?x2xf32> to tensor<2x3x4xf32>
908  return %0 : tensor<2x3x4xf32>
909}
910
911
912module attributes {transform.with_named_sequence} {
913  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
914    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
915    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
916    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
917    transform.yield
918  }
919}
920
921// -----
922
923// CHECK-LABEL: func @pad_static_source(
924//  CHECK-SAME:                  %[[ARG0:.*]]: tensor<2x5x2xf32>, %[[PAD:.*]]: f32
925//   CHECK-NOT:   tensor.pad
926//   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
927//   CHECK-DAG:   %[[C2:.*]] = arith.constant 2 : index
928//       CHECK:   %[[INIT:.*]] = tensor.empty() : tensor<2x6x4xf32>
929//       CHECK:   %[[VEC:.*]] =  vector.broadcast %[[PAD]] : f32 to vector<2x6x4xf32>
930//       CHECK:   %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<2x6x4xf32>, tensor<2x6x4xf32>
931//       CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : tensor<2x5x2xf32>, vector<2x5x2xf32>
932//       CHECK:   %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x5x2xf32>, tensor<2x6x4xf32>
933//       CHECK:   return %[[WRITE]]
934func.func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tensor<2x6x4xf32> {
935  %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] {
936    ^bb0(%arg1: index, %arg2: index, %arg3: index):
937      tensor.yield %pad_value : f32
938    } : tensor<2x5x2xf32> to tensor<2x6x4xf32>
939  return %0 : tensor<2x6x4xf32>
940}
941
942
943module attributes {transform.with_named_sequence} {
944  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
945    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
946    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
947    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
948    transform.yield
949  }
950}
951
952
953// -----
954
955// CHECK-LABEL: func @pad_static_dynamic(
956//  CHECK-SAME:                          %[[SRC:.*]]: tensor<1x2x2x?xf32>, %[[LOW:.*]]: index, %[[HIGH:.*]]: index
957//   CHECK-NOT:   tensor.pad
958//   CHECK-DAG:   %[[C2:.*]] = arith.constant 2 : index
959//   CHECK-DAG:   %[[C3:.*]] = arith.constant 3 : index
960//   CHECK-DAG:   %[[C5:.*]] = arith.constant 5 : index
961//       CHECK:   %[[V0:.*]] = arith.addi %[[LOW]], %[[C2]] : index
962//       CHECK:   %[[V1:.*]] = arith.addi %[[V0]], %[[C3]] : index
963//       CHECK:   %[[V2:.*]] = arith.addi %[[HIGH]], %[[C5]] : index
964//       CHECK:   %[[DIM3:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32>
965//       CHECK:   %[[V4:.*]] = arith.addi %[[DIM3]], %[[C3]] : index
966//       CHECK:   %[[V5:.*]] = arith.addi %[[V4]], %[[C2]] : index
967//       CHECK:   %[[INIT:.*]] = tensor.empty(%[[V1]], %[[V2]], %[[V5]]) : tensor<6x?x?x?xf32>
968//       CHECK:   %[[FILL:.*]] = linalg.fill ins(%{{.*}} : f32) outs(%[[INIT]] : tensor<6x?x?x?xf32>) -> tensor<6x?x?x?xf32>
969//       CHECK:   %[[SRCDIM:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32>
970//       CHECK:   %[[RESULT:.*]] = tensor.insert_slice %[[SRC]] into %[[FILL]][2, %[[LOW]], 3, 3] [1, 2, 2, %[[SRCDIM]]] [1, 1, 1, 1] : tensor<1x2x2x?xf32> into tensor<6x?x?x?xf32>
971//       CHECK:   return %[[RESULT]]
972func.func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index,
973                  %pad_value: f32) -> tensor<6x?x?x?xf32> {
974  %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] {
975    ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
976      tensor.yield %pad_value : f32
977    } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32>
978  return %0 : tensor<6x?x?x?xf32>
979}
980
981
982module attributes {transform.with_named_sequence} {
983  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
984    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
985    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
986    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
987    transform.yield
988  }
989}
990
991// -----
992
993// CHECK-LABEL: func @pad_static_complex(
994//   CHECK-NOT:   vector<
995func.func @pad_static_complex(%arg0: tensor<2x5x2xcomplex<f32>>, %pad_value: complex<f32>) -> tensor<2x6x4xcomplex<f32>> {
996  %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] {
997    ^bb0(%arg1: index, %arg2: index, %arg3: index):
998      tensor.yield %pad_value : complex<f32>
999    } : tensor<2x5x2xcomplex<f32>> to tensor<2x6x4xcomplex<f32>>
1000  return %0 : tensor<2x6x4xcomplex<f32>>
1001}
1002
1003
1004module attributes {transform.with_named_sequence} {
1005  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1006    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1007    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1008    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
1009    transform.yield
1010  }
1011}
1012
1013// -----
1014
1015func.func private @make_vector() -> tensor<12x13xf32>
1016
1017// CHECK-LABEL:   func.func @pad_and_insert_slice_dest(
1018// CHECK-SAME:      %[[ARG_0:.*]]: tensor<1x5x6xf32>) -> tensor<1x12x13xf32> {
1019// CHECK:           %[[C0:.*]] = arith.constant 0.000000e+00 : f32
1020// CHECK:           %[[CST:.*]] = arith.constant dense<5.000000e+00> : vector<1x12x13xf32>
1021// CHECK:           %[[C0_IDX:.*]] = arith.constant 0 : index
1022// CHECK:           %[[PAD_VAL:.*]] = arith.constant 5.000000e+00 : f32
1023// CHECK:           %[[EMPTY:.*]] = tensor.empty() : tensor<1x12x13xf32>
1024// CHECK:           %[[WRITE_1:.*]] = vector.transfer_write %[[CST]], %[[EMPTY]]{{\[}}%[[C0_IDX]], %[[C0_IDX]], %[[C0_IDX]]] {in_bounds = [true, true, true]} : vector<1x12x13xf32>, tensor<1x12x13xf32>
1025// CHECK:           %[[READ_1:.*]] = vector.transfer_read %[[ARG_0]]{{\[}}%[[C0_IDX]], %[[C0_IDX]], %[[C0_IDX]]], %[[PAD_VAL]] {in_bounds = [true, true, true]} : tensor<1x5x6xf32>, vector<1x5x6xf32>
1026// CHECK:           %[[WRITE_2:.*]] = vector.transfer_write %[[READ_1]], %[[WRITE_1]]{{\[}}%[[C0_IDX]], %[[C0_IDX]], %[[C0_IDX]]] {in_bounds = [true, true, true]} : vector<1x5x6xf32>, tensor<1x12x13xf32>
1027// CHECK:           %[[MAKE_VEC:.*]] = call @make_vector() : () -> tensor<12x13xf32>
1028// CHECK:           %[[READ_2:.*]] = vector.transfer_read %[[MAKE_VEC]]{{\[}}%[[C0_IDX]], %[[C0_IDX]]], %[[C0]] {in_bounds = [true, true]} : tensor<12x13xf32>, vector<12x13xf32>
1029// CHECK:           %[[RES:.*]] = vector.transfer_write %[[READ_2]], %[[WRITE_2]]{{\[}}%[[C0_IDX]], %[[C0_IDX]], %[[C0_IDX]]] {in_bounds = [true, true]} : vector<12x13xf32>, tensor<1x12x13xf32>
1030// CHECK:           return %[[RES]] : tensor<1x12x13xf32>
1031func.func @pad_and_insert_slice_dest(
1032    %arg0: tensor<1x5x6xf32>) -> tensor<1x12x13xf32> {
1033  %c5 = arith.constant 5.0 : f32
1034  %0 = tensor.pad %arg0 low[0, 0, 0] high[0, 7, 7] {
1035    ^bb0(%arg2: index, %arg3: index, %arg4: index):
1036      tensor.yield %c5 : f32
1037  } : tensor<1x5x6xf32> to tensor<1x12x13xf32>
1038  %1 = call @make_vector() : () -> tensor<12x13xf32>
1039  %r = tensor.insert_slice %1 into %0[0, 0, 0][1, 12, 13][1, 1, 1] : tensor<12x13xf32> into tensor<1x12x13xf32>
1040  return %r : tensor<1x12x13xf32>
1041}
1042
1043module attributes {transform.with_named_sequence} {
1044  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1045    %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1046    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1047    %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
1048    transform.yield
1049  }
1050}
1051
1052// -----
1053
1054// CHECK-LABEL: func @pad_tensor_non_const_pad_value
1055//  CHECK-SAME:     %[[ARG0:.*]]: tensor<5x6xf32>
1056//   CHECK-NOT:   tensor.pad
1057//   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
1058//   CHECK-DAG:   %[[C3:.*]] = arith.constant 3 : index
1059//   CHECK-DAG:   %[[C4:.*]] = arith.constant 4 : index
1060//       CHECK:   %[[FILL:.*]] = tensor.generate
1061//       CHECK:     %[[RES:.*]] = arith.mulf
1062//       CHECK:     tensor.yield %[[RES]] : f32
1063//       CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true]} : tensor<5x6xf32>, vector<5x6xf32>
1064//       CHECK:   %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C3]], %[[C4]]] {in_bounds = [true, true]} : vector<5x6xf32>, tensor<12x13xf32>
1065//       CHECK:   return %[[WRITE]]
1066func.func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x13xf32> {
1067  %c0 = arith.constant 0 : index
1068  %c5 = arith.constant 5.0 : f32
1069  %0 = tensor.pad %arg0 low[3, 4] high[4, 3] {
1070    ^bb0(%arg1: index, %arg2: index):
1071      %i1 = arith.index_cast %arg1 : index to i32
1072      %i2 = arith.index_cast %arg2 : index to i32
1073      %f1 = arith.sitofp %i1 : i32 to f32
1074      %f2 = arith.sitofp %i2 : i32 to f32
1075      %m = arith.mulf %f1, %f2 : f32
1076      tensor.yield %m : f32
1077  } : tensor<5x6xf32> to tensor<12x13xf32>
1078  return %0 : tensor<12x13xf32>
1079}
1080
1081
1082module attributes {transform.with_named_sequence} {
1083  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1084    %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1085    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1086    %5 = transform.structured.vectorize_children_and_apply_patterns %4  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
1087    transform.yield
1088  }
1089}
1090
1091// -----
1092
1093// CHECK-LABEL: func @sum_exp
1094func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
1095  -> tensor<4x16xf32>
1096{
1097  // CHECK: vector.transfer_read {{.*}} : tensor<4x16x8xf32>, vector<4x16x8xf32>
1098  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x16xf32>, vector<4x16xf32>
1099  // CHECK: math.exp {{.*}} : vector<4x16x8xf32>
1100  // CHECK: vector.multi_reduction <add>, %{{.*}}, %{{.*}} [2] : vector<4x16x8xf32> to vector<4x16xf32>
1101  // CHECK: vector.transfer_write {{.*}} : vector<4x16xf32>, tensor<4x16xf32>
1102  // CHECK: return {{.*}} : tensor<4x16xf32>
1103  %0 = linalg.generic {
1104      indexing_maps = [
1105        affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
1106        affine_map<(d0, d1, d2) -> (d0, d1)>
1107      ],
1108      iterator_types = ["parallel", "parallel", "reduction"]
1109    } ins(%input : tensor<4x16x8xf32>) outs(%output : tensor<4x16xf32>) {
1110    ^bb0(%arg0: f32, %arg1: f32):
1111      %1 = math.exp %arg0 : f32
1112      %2 = arith.addf %1, %arg1 : f32
1113      linalg.yield %2 : f32
1114    } -> tensor<4x16xf32>
1115  return %0 : tensor<4x16xf32>
1116}
1117
1118
1119module attributes {transform.with_named_sequence} {
1120  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1121    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1122    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1123    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1124    transform.yield
1125  }
1126}
1127
1128// -----
1129
1130// CHECK-DAG: #[[$M1:.*]] =  affine_map<(d0, d1) -> (d1, d0, 0, 0)>
1131// CHECK-DAG: #[[$M2:.*]] =  affine_map<(d0, d1) -> (0, 0, d1, d0)>
1132// CHECK-DAG: #[[$M3:.*]] =  affine_map<(d0, d1) -> (d1, d0)>
1133
1134// CHECK-LABEL: func @sum_exp_2
1135func.func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: tensor<5x2xf32>)
1136  -> tensor<5x2xf32>
1137{
1138  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M1]]} : tensor<3x2xf32>, vector<2x3x4x5xf32>
1139  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M2]]} : tensor<5x4xf32>, vector<2x3x4x5xf32>
1140  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M3]]} : tensor<5x2xf32>, vector<2x5xf32>
1141  // CHECK: math.exp {{.*}} : vector<2x3x4x5xf32>
1142  // CHECK: math.exp {{.*}} : vector<2x3x4x5xf32>
1143  // CHECK: addf {{.*}} : vector<2x3x4x5xf32>
1144  // CHECK: vector.multi_reduction <add>, {{.*}}, %{{.*}}  [1, 2] : vector<2x3x4x5xf32> to vector<2x5xf32>
1145  // CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true], permutation_map = #[[$M3]]} : vector<2x5xf32>, tensor<5x2xf32>
1146  // CHECK: return {{.*}} : tensor<5x2xf32>
1147  %0 = linalg.generic {
1148      indexing_maps = [
1149        affine_map<(d0, d1, d2, d3) -> (d1, d0)>,
1150        affine_map<(d0, d1, d2, d3) -> (d3, d2)>,
1151        affine_map<(d0, d1, d2, d3) -> (d3, d0)>
1152      ],
1153      iterator_types = ["parallel", "reduction", "reduction", "parallel"]
1154    } ins(%input, %input_2 : tensor<3x2xf32>, tensor<5x4xf32>) outs(%output : tensor<5x2xf32>) {
1155    ^bb0(%arg0: f32, %arg1: f32, %arg2: f32):
1156      %1 = math.exp %arg0 : f32
1157      %2 = math.exp %arg1 : f32
1158      %3 = arith.addf %1, %2 : f32
1159      %4 = arith.addf %3, %arg2 : f32
1160      linalg.yield %4 : f32
1161    } -> tensor<5x2xf32>
1162  return %0 : tensor<5x2xf32>
1163}
1164
1165
1166module attributes {transform.with_named_sequence} {
1167  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1168    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1169    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1170    %5 = transform.structured.vectorize_children_and_apply_patterns %4  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
1171    transform.yield
1172  }
1173}
1174
1175// -----
1176
1177// CHECK-LABEL:   func @red_maximumf_2d(
1178func.func @red_maximumf_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
1179  // CHECK: %[[CMINF:.+]] = arith.constant dense<-3.402820e+38> : vector<4xf32>
1180  // CHECK: tensor.empty() : tensor<4xf32>
1181  // CHECK: vector.multi_reduction <maximumf>, {{.*}}, %[[CMINF]] [1] : vector<4x4xf32> to vector<4xf32>
1182  // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
1183  %ident = arith.constant -3.40282e+38 : f32
1184  %init = tensor.empty() : tensor<4xf32>
1185  %fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
1186  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1187                                          affine_map<(d0, d1) -> (d0)>],
1188                         iterator_types = ["parallel", "reduction"]}
1189                         ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
1190  ^bb0(%in0: f32, %out0: f32):
1191    %max = arith.maximumf %in0, %out0 : f32
1192    linalg.yield %max : f32
1193  } -> tensor<4xf32>
1194  return %red : tensor<4xf32>
1195}
1196
1197
1198module attributes {transform.with_named_sequence} {
1199  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1200    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1201    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1202    %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
1203    transform.yield
1204  }
1205}
1206
1207// -----
1208
1209// CHECK-LABEL:   func @red_maxnumf_2d(
1210func.func @red_maxnumf_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
1211  // CHECK: %[[CMINF:.+]] = arith.constant dense<-3.402820e+38> : vector<4xf32>
1212  // CHECK: tensor.empty() : tensor<4xf32>
1213  // CHECK: vector.multi_reduction <maxnumf>, {{.*}}, %[[CMINF]] [1] : vector<4x4xf32> to vector<4xf32>
1214  // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
1215  %ident = arith.constant -3.40282e+38 : f32
1216  %init = tensor.empty() : tensor<4xf32>
1217  %fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
1218  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1219                                          affine_map<(d0, d1) -> (d0)>],
1220                         iterator_types = ["parallel", "reduction"]}
1221                         ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
1222  ^bb0(%in0: f32, %out0: f32):
1223    %max = arith.maxnumf %in0, %out0 : f32
1224    linalg.yield %max : f32
1225  } -> tensor<4xf32>
1226  return %red : tensor<4xf32>
1227}
1228
1229
1230module attributes {transform.with_named_sequence} {
1231  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1232    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1233    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1234    %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
1235    transform.yield
1236  }
1237}
1238
1239// -----
1240
1241// CHECK-LABEL:   func @red_minimumf_2d(
1242func.func @red_minimumf_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
1243  // CHECK: %[[CMAXF:.+]] = arith.constant dense<3.402820e+38> : vector<4xf32>
1244  // CHECK: tensor.empty() : tensor<4xf32>
1245  // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32>
1246  // CHECK: vector.multi_reduction <minimumf>, {{.*}}, %[[CMAXF]] [1] : vector<4x4xf32> to vector<4xf32>
1247  // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
1248  %maxf32 = arith.constant 3.40282e+38 : f32
1249  %init = tensor.empty() : tensor<4xf32>
1250  %fill = linalg.fill ins(%maxf32 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
1251  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1252                                          affine_map<(d0, d1) -> (d0)>],
1253                         iterator_types = ["parallel", "reduction"]}
1254                         ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
1255  ^bb0(%in0: f32, %out0: f32):
1256    %min = arith.minimumf %out0, %in0 : f32
1257    linalg.yield %min : f32
1258  } -> tensor<4xf32>
1259  return %red : tensor<4xf32>
1260}
1261
1262
1263module attributes {transform.with_named_sequence} {
1264  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1265    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1266    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1267    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1268    transform.yield
1269  }
1270}
1271
1272// -----
1273
1274// CHECK-LABEL:   func @red_minnumf_2d(
1275func.func @red_minnumf_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
1276  // CHECK: %[[CMAXF:.+]] = arith.constant dense<3.402820e+38> : vector<4xf32>
1277  // CHECK: tensor.empty() : tensor<4xf32>
1278  // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32>
1279  // CHECK: vector.multi_reduction <minnumf>, {{.*}}, %[[CMAXF]] [1] : vector<4x4xf32> to vector<4xf32>
1280  // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
1281  %maxf32 = arith.constant 3.40282e+38 : f32
1282  %init = tensor.empty() : tensor<4xf32>
1283  %fill = linalg.fill ins(%maxf32 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
1284  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1285                                          affine_map<(d0, d1) -> (d0)>],
1286                         iterator_types = ["parallel", "reduction"]}
1287                         ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
1288  ^bb0(%in0: f32, %out0: f32):
1289    %min = arith.minnumf %out0, %in0 : f32
1290    linalg.yield %min : f32
1291  } -> tensor<4xf32>
1292  return %red : tensor<4xf32>
1293}
1294
1295
1296module attributes {transform.with_named_sequence} {
1297  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1298    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1299    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1300    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1301    transform.yield
1302  }
1303}
1304
1305// -----
1306
1307// CHECK-LABEL:   func @red_mul_2d(
1308func.func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
1309  // CHECK: tensor.empty() : tensor<4xf32>
1310  // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32>
1311  // CHECK: vector.multi_reduction <mul>, {{.*}}, {{.*}} [1] : vector<4x4xf32> to vector<4xf32>
1312  // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
1313  %ident = arith.constant 1.0 : f32
1314  %init = tensor.empty() : tensor<4xf32>
1315  %fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
1316  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1317                                          affine_map<(d0, d1) -> (d0)>],
1318                         iterator_types = ["parallel", "reduction"]}
1319                         ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
1320  ^bb0(%in0: f32, %out0: f32):
1321    %mul = arith.mulf %in0, %out0 : f32
1322    linalg.yield %mul : f32
1323  } -> tensor<4xf32>
1324  return %red : tensor<4xf32>
1325}
1326
1327
1328module attributes {transform.with_named_sequence} {
1329  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1330    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1331    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1332    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1333    transform.yield
1334  }
1335}
1336
1337// -----
1338
1339// CHECK-LABEL:   func @red_or_2d(
1340func.func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
1341  // CHECK: tensor.empty() : tensor<4xi1>
1342  // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
1343  // CHECK: vector.multi_reduction <or>, {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1>
1344  // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
1345  %ident = arith.constant false
1346  %init = tensor.empty() : tensor<4xi1>
1347  %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1>
1348  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1349                                          affine_map<(d0, d1) -> (d0)>],
1350                         iterator_types = ["parallel", "reduction"]}
1351                         ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) {
1352  ^bb0(%in0: i1, %out0: i1):
1353    %or = arith.ori %in0, %out0 : i1
1354    linalg.yield %or : i1
1355  } -> tensor<4xi1>
1356  return %red : tensor<4xi1>
1357}
1358
1359
1360module attributes {transform.with_named_sequence} {
1361  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1362    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1363    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1364    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1365    transform.yield
1366  }
1367}
1368
1369// -----
1370
1371// CHECK-LABEL:   func @red_and_2d(
1372func.func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
1373  // CHECK: tensor.empty() : tensor<4xi1>
1374  // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
1375  // CHECK: vector.multi_reduction <and>, {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1>
1376  // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
1377  %ident = arith.constant true
1378  %init = tensor.empty() : tensor<4xi1>
1379  %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1>
1380  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1381                                          affine_map<(d0, d1) -> (d0)>],
1382                         iterator_types = ["parallel", "reduction"]}
1383                         ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) {
1384  ^bb0(%in0: i1, %out0: i1):
1385    %and = arith.andi %in0, %out0 : i1
1386    linalg.yield %and : i1
1387  } -> tensor<4xi1>
1388  return %red : tensor<4xi1>
1389}
1390
1391
1392module attributes {transform.with_named_sequence} {
1393  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1394    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1395    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1396    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1397    transform.yield
1398  }
1399}
1400
1401// -----
1402
1403// CHECK-LABEL:   func @red_xor_2d(
1404func.func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
1405  // CHECK: tensor.empty() : tensor<4xi1>
1406  // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
1407  // CHECK: vector.multi_reduction <xor>, {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1>
1408  // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
1409  %ident = arith.constant false
1410  %init = tensor.empty() : tensor<4xi1>
1411  %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1>
1412  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1413                                          affine_map<(d0, d1) -> (d0)>],
1414                         iterator_types = ["parallel", "reduction"]}
1415                         ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) {
1416  ^bb0(%in0: i1, %out0: i1):
1417    %xor = arith.xori %in0, %out0 : i1
1418    linalg.yield %xor : i1
1419  } -> tensor<4xi1>
1420  return %red : tensor<4xi1>
1421}
1422
1423
1424module attributes {transform.with_named_sequence} {
1425  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1426    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1427    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1428    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1429    transform.yield
1430  }
1431}
1432
1433// -----
1434
1435// CHECK-DAG: #[[$M5:.*]] = affine_map<(d0, d1) -> (d0, 0)>
1436
1437// CHECK-LABEL:   func @explicit_broadcast(
1438func.func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4x4xf32> {
1439  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32>
1440  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M5]]} : tensor<4x1xf32>, vector<4x4xf32>
1441  // CHECK: subf {{.*}} : vector<4x4xf32>
1442  // CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<4x4xf32>, tensor<4x4xf32>
1443  %c0 = arith.constant 0.0 : f32
1444  %init = tensor.empty() : tensor<4x4xf32>
1445  %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4x4xf32>) -> tensor<4x4xf32>
1446  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1447                                          affine_map<(d0, d1) -> (d0, 0)>,
1448                                          affine_map<(d0, d1) -> (d0, d1)>],
1449   iterator_types = ["parallel", "parallel"]}
1450   ins(%arg0, %arg1 : tensor<4x4xf32>, tensor<4x1xf32>)
1451   outs(%fill : tensor<4x4xf32>) {
1452    ^bb0(%arg7: f32, %arg8: f32, %arg9: f32):
1453      %40 = arith.subf %arg7, %arg8 : f32
1454      linalg.yield %40 : f32
1455    } -> tensor<4x4xf32>
1456  return %red : tensor<4x4xf32>
1457}
1458
1459
1460module attributes {transform.with_named_sequence} {
1461  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1462    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1463    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1464    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1465    transform.yield
1466  }
1467}
1468
1469// -----
1470
1471// CHECK-DAG: #[[$M6:.*]] = affine_map<(d0, d1) -> (d0, 0)>
1472
1473// CHECK-LABEL:   func @fused_broadcast_red_2d
1474func.func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4xf32> {
1475  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32>
1476  // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M6]]} : tensor<4x1xf32>, vector<4x4xf32>
1477  // CHECK: subf {{.*}} : vector<4x4xf32>
1478  // CHECK: math.exp {{.*}} : vector<4x4xf32>
1479  // CHECK: vector.multi_reduction <add>, {{.*}}, {{.*}} : vector<4x4xf32> to vector<4xf32>
1480  // CHECK: vector.transfer_write {{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<4xf32>
1481  %c0 = arith.constant 0.0 : f32
1482  %init = tensor.empty() : tensor<4xf32>
1483  %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
1484  %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1485                                          affine_map<(d0, d1) -> (d0, 0)>,
1486                                          affine_map<(d0, d1) -> (d0)>],
1487   iterator_types = ["parallel", "reduction"]}
1488   ins(%arg0, %arg1 : tensor<4x4xf32>, tensor<4x1xf32>)
1489   outs(%fill : tensor<4xf32>) {
1490    ^bb0(%arg7: f32, %arg8: f32, %arg9: f32):
1491      %40 = arith.subf %arg7, %arg8 : f32
1492      %41 = math.exp %40 : f32
1493      %42 = arith.addf %41, %arg9 : f32
1494      linalg.yield %42 : f32
1495    } -> tensor<4xf32>
1496  return %red : tensor<4xf32>
1497}
1498
1499
1500module attributes {transform.with_named_sequence} {
1501  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1502    %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1503    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1504    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1505
1506    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1507    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1508    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1509    transform.yield
1510  }
1511}
1512
1513// -----
1514
1515//  CHECK-LABEL: func @reduce_1d(
1516//   CHECK-SAME:   %[[A:.*]]: tensor<32xf32>
1517func.func @reduce_1d(%arg0: tensor<32xf32>) -> tensor<f32> {
1518  //  CHECK-DAG: %[[F0:.*]] = arith.constant 0.000000e+00 : f32
1519  //  CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
1520  %f0 = arith.constant 0.000000e+00 : f32
1521
1522  //      CHECK: %[[init:.*]] = tensor.empty() : tensor<f32>
1523  %0 = tensor.empty() : tensor<f32>
1524
1525  %1 = linalg.fill ins(%f0 : f32) outs(%0 : tensor<f32>) -> tensor<f32>
1526  //      CHECK: %[[r:.*]] = vector.transfer_read %[[A]][%[[C0]]]
1527  // CHECK-SAME:   : tensor<32xf32>, vector<32xf32>
1528  //      CHECK: %[[red:.*]] = vector.multi_reduction <add>, %[[r]], %[[F0]] [0]
1529  // CHECK-SAME:   : vector<32xf32> to f32
1530  //      CHECK: %[[red_v1:.*]] = vector.broadcast %[[red]] : f32 to vector<f32>
1531  //      CHECK: %[[res:.*]] = vector.transfer_write %[[red_v1]], %[[init]][]
1532  // CHECK-SAME:   : vector<f32>, tensor<f32>
1533  %2 = linalg.generic {
1534         indexing_maps = [affine_map<(d0) -> (d0)>,
1535                          affine_map<(d0) -> ()>],
1536         iterator_types = ["reduction"]}
1537         ins(%arg0 : tensor<32xf32>)
1538         outs(%1 : tensor<f32>) {
1539    ^bb0(%a: f32, %b: f32):
1540      %3 = arith.addf %a, %b : f32
1541      linalg.yield %3 : f32
1542    } -> tensor<f32>
1543
1544  return %2 : tensor<f32>
1545}
1546
1547module attributes {transform.with_named_sequence} {
1548  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1549    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1550    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1551    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1552    transform.yield
1553  }
1554}
1555
1556
1557// -----
1558
1559// This test checks that vectorization does not occur when an input indexing map
1560// is not a projected permutation. In the future, this can be converted to a
1561// positive test when support is added.
1562
1563// CHECK-LABEL:   func @not_projected_permutation
1564func.func @not_projected_permutation(%arg0: tensor<8x8xf32>) -> tensor<6x6x3x3xf32> {
1565  %c0 = arith.constant 0.0 : f32
1566  %init = tensor.empty() : tensor<6x6x3x3xf32>
1567  %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<6x6x3x3xf32>) -> tensor<6x6x3x3xf32>
1568  // CHECK: linalg.generic
1569  %result = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0 + d2, d1 + d3)>,
1570                                             affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>],
1571   iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
1572   ins(%arg0 : tensor<8x8xf32>)
1573   outs(%fill : tensor<6x6x3x3xf32>) {
1574    ^bb0(%arg7: f32, %arg9: f32):
1575      linalg.yield %arg7 : f32
1576    } -> tensor<6x6x3x3xf32>
1577  return %result : tensor<6x6x3x3xf32>
1578}
1579
1580module attributes {transform.with_named_sequence} {
1581  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1582    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1583    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1584    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1585    transform.yield
1586  }
1587}
1588
1589// -----
1590
1591// Check vectorization can handle cases where outputs are a mix of reduced and non-reduced values.
1592func.func @mixed_parallel_reduced_results(%arg0 : tensor<2x4x8xf32>,
1593    %arg1 : tensor<2x4xf32>, %arg2 : tensor<2x4x8xf32>, %arg3 : tensor<2x4xf32>) ->
1594    (tensor<2x4x8xf32>, tensor<2x4xf32>) {
1595  %0:2 = linalg.generic {
1596      indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>,
1597                       affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
1598      iterator_types = ["parallel", "parallel", "reduction"]}
1599      ins(%arg0, %arg1 : tensor<2x4x8xf32>, tensor<2x4xf32>)
1600      outs(%arg2, %arg3 : tensor<2x4x8xf32>, tensor<2x4xf32>) {
1601    ^bb0(%b0 : f32, %b1 : f32, %b2 : f32, %b3 : f32):
1602      %1 = arith.mulf %b0, %b1 : f32
1603      %2 = arith.addf %1, %b3 : f32
1604      linalg.yield %1, %2 : f32, f32
1605  } -> (tensor<2x4x8xf32>, tensor<2x4xf32>)
1606  return %0#0, %0#1 : tensor<2x4x8xf32>, tensor<2x4xf32>
1607}
1608// CHECK-LABEL: func @mixed_parallel_reduced_results(
1609//  CHECK-SAME:     %[[ARG0:[a-zA-Z0-9]+]]: tensor<2x4x8xf32>
1610//  CHECK-SAME:     %[[ARG1:[a-zA-Z0-9]+]]: tensor<2x4xf32>
1611//  CHECK-SAME:     %[[ARG2:[a-zA-Z0-9]+]]: tensor<2x4x8xf32>
1612//  CHECK-SAME:     %[[ARG3:[a-zA-Z0-9]+]]: tensor<2x4xf32>
1613//   CHECK-DAG:   %[[V0:.+]] = vector.transfer_read %[[ARG0]]
1614//   CHECK-DAG:   %[[V1:.+]] = vector.transfer_read %[[ARG1]]
1615//   CHECK-DAG:   %[[V2:.+]] = vector.transfer_read %[[ARG3]]
1616//   CHECK-DAG:   %[[MUL:.+]] = arith.mulf %[[V0]], %[[V1]]
1617//   CHECK-DAG:   %[[ADD:.+]] = vector.multi_reduction <add>, %[[MUL]], %[[V2]]
1618//   CHECK-DAG:   vector.transfer_write %[[MUL]], %[[ARG2]]
1619//   CHECK-DAG:   vector.transfer_write %[[ADD]], %[[ARG3]]
1620
1621module attributes {transform.with_named_sequence} {
1622  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1623    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1624    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1625    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op
1626    transform.yield
1627  }
1628}
1629
1630// -----
1631
1632func.func @vectorize_map(%arg0: memref<64xf32>,
1633    %arg1: memref<64xf32>, %arg2: memref<64xf32>) {
1634  linalg.map ins(%arg0, %arg1 : memref<64xf32>, memref<64xf32>)
1635             outs(%arg2 : memref<64xf32>)
1636    (%in: f32, %in_0: f32) {
1637      %0 = arith.addf %in, %in_0 : f32
1638      linalg.yield %0 : f32
1639    }
1640  return
1641}
1642// CHECK-LABEL: func @vectorize_map
1643// CHECK:         %[[LHS:.*]] = vector.transfer_read
1644// CHECK-NEXT:    %[[RHS:.*]] = vector.transfer_read
1645// CHECK-NEXT:    arith.addf %[[LHS]], %[[RHS]] : vector<64xf32>
1646
1647module attributes {transform.with_named_sequence} {
1648  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1649    %0 = transform.structured.match ops{["linalg.map"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1650    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1651    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1652    transform.yield
1653  }
1654}
1655
1656// -----
1657
1658func.func @vectorize_transpose(%arg0: memref<16x32x64xf32>,
1659                               %arg1: memref<32x64x16xf32>) {
1660  linalg.transpose ins(%arg0 : memref<16x32x64xf32>)
1661                   outs(%arg1 : memref<32x64x16xf32>) permutation = [1, 2, 0]
1662  return
1663}
1664// CHECK-LABEL: func @vectorize_transpose
1665// CHECK:         vector.transpose
1666// CHECK-SAME:      [1, 2, 0] : vector<16x32x64xf32> to vector<32x64x16xf32>
1667
1668module attributes {transform.with_named_sequence} {
1669  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1670    %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1671    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1672    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1673    transform.yield
1674  }
1675}
1676
1677// -----
1678
1679func.func @vectorize_reduce(%arg0: memref<16x32x64xf32>,
1680                  %arg1: memref<16x64xf32>) {
1681  linalg.reduce ins(%arg0 : memref<16x32x64xf32>)
1682                outs(%arg1 : memref<16x64xf32>) dimensions = [1]
1683    (%in: f32, %init: f32) {
1684      %0 = arith.addf %in, %init : f32
1685      linalg.yield %0 : f32
1686    }
1687  return
1688}
1689// CHECK-LABEL: func @vectorize_reduce
1690// CHECK:         vector.multi_reduction <add>
1691// CHECK-SAME:    : vector<16x32x64xf32> to vector<16x64xf32>
1692
1693module attributes {transform.with_named_sequence} {
1694  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1695    %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1696    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1697    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1698    transform.yield
1699  }
1700}
1701
1702// -----
1703
1704// This is a regression test. This IR cannot be vectorized, but
1705// structured.vectorize_children_and_apply_patterns should nevertheless succeed.
1706
1707#map = affine_map<(d0) -> (d0)>
1708// CHECK-LABEL:   @not_vectorizable
1709func.func @not_vectorizable(%arg0: tensor<1x?xf32>, %arg1: index, %arg2: index, %arg3: index) -> tensor<1x128xf32> {
1710  %c0 = arith.constant 0 : index
1711  %0 = tensor.empty() : tensor<1x128xf32>
1712  %1 = scf.for %arg5 = %arg2 to %arg1 step %arg3 iter_args(%arg6 = %0) -> (tensor<1x128xf32>) {
1713    %extracted_slice = tensor.extract_slice %arg6[0, 0] [1, %arg1] [1, 1] : tensor<1x128xf32> to tensor<?xf32>
1714    %sz0 = tensor.dim %extracted_slice, %c0 : tensor<?xf32>
1715    %expanded = tensor.expand_shape %extracted_slice [[0, 1]] output_shape [1, %sz0] : tensor<?xf32> into tensor<1x?xf32>
1716    %extracted_slice_0 = tensor.extract_slice %arg0[0, %arg3] [1, %arg2] [1, 1] : tensor<1x?xf32> to tensor<?xf32>
1717    %extracted_slice_1 = tensor.extract_slice %expanded[0, %arg3] [1, %arg2] [1, 1] : tensor<1x?xf32> to tensor<?xf32>
1718    %2 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} ins(%extracted_slice_0 : tensor<?xf32>) outs(%extracted_slice_1 : tensor<?xf32>) {
1719    ^bb0(%in: f32, %out: f32):
1720      %3 = arith.addf %in, %out : f32
1721      linalg.yield %3 : f32
1722    } -> tensor<?xf32>
1723    %inserted_slice = tensor.insert_slice %2 into %expanded[0, %arg3] [1, %arg2] [1, 1] : tensor<?xf32> into tensor<1x?xf32>
1724    %collapsed = tensor.collapse_shape %inserted_slice [[0, 1]] : tensor<1x?xf32> into tensor<?xf32>
1725    %inserted_slice_2 = tensor.insert_slice %collapsed into %arg6[0, 0] [1, %arg1] [1, 1] : tensor<?xf32> into tensor<1x128xf32>
1726    scf.yield %inserted_slice_2 : tensor<1x128xf32>
1727  }
1728  return %1 : tensor<1x128xf32>
1729}
1730module attributes {transform.with_named_sequence} {
1731  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
1732    %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
1733    %1 = transform.structured.vectorize_children_and_apply_patterns %0 : (!transform.any_op) -> !transform.any_op
1734    transform.yield
1735  }
1736}
1737
1738// -----
1739
1740// Regression test: %13 was incorrectly detected as a reduction and
1741// vectorization failed.
1742
1743func.func @wrong_reduction_detection(%input: tensor<120x64xf32>) -> tensor<120x64xf32> {
1744  %c0 = arith.constant 0 : index
1745  %c4 = arith.constant 4 : index
1746  %c64 = arith.constant 64 : index
1747  %cst_6 = arith.constant 4.000000e+00 : f32
1748  %1 = scf.for %arg0 = %c0 to %c64 step %c4 iter_args(%arg1 = %input) -> (tensor<120x64xf32>) {
1749    %extracted_slice = tensor.extract_slice %arg1[%c0, %arg0] [1, 4] [1, 1] : tensor<120x64xf32> to tensor<1x4xf32>
1750    %10 = linalg.fill {__internal_linalg_transform__ = "1"} ins(%cst_6 : f32) outs(%extracted_slice : tensor<1x4xf32>) -> tensor<1x4xf32>
1751    %11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} outs(%10 : tensor<1x4xf32>) {
1752    ^bb0(%out: f32):
1753      %12 = linalg.index 0 : index
1754      %13 = arith.addi %arg0, %12 : index
1755      %18 = arith.index_cast %13 : index to i32
1756      %20 = arith.uitofp %18 : i32 to f32
1757      %67 = arith.mulf %out, %20 : f32
1758      linalg.yield %67 : f32
1759    } -> tensor<1x4xf32>
1760    %inserted_slice = tensor.insert_slice %11 into %arg1[%c0, %arg0] [1, 4] [1, 1] : tensor<1x4xf32> into tensor<120x64xf32>
1761    scf.yield %inserted_slice : tensor<120x64xf32>
1762  }
1763  return %1 : tensor<120x64xf32>
1764}
1765
1766module attributes {transform.with_named_sequence} {
1767  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1768    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1769    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1770    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1771    transform.yield
1772  }
1773}
1774
1775// CHECK-LABEL: @wrong_reduction_detection
1776// CHECK:         vector.broadcast
1777// CHECK:         vector.transfer_write
1778
1779// -----
1780
1781// Don't vectorize tensor<0xf32> : (!transform.any_op) -> !transform.any_op
1782// CHECK-LABEL: @tensor_size0
1783// CHECK:         linalg.generic
1784func.func @tensor_size0(%arg0: tensor<0xf32>,
1785                        %arg1: tensor<f32>) -> tensor<f32> {
1786  %0 = linalg.generic
1787  {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> ()>],
1788  iterator_types = ["reduction"]}
1789  ins(%arg0 : tensor<0xf32>) outs(%arg1 : tensor<f32>) {
1790    ^bb0(%in: f32, %out: f32):
1791    %12 = arith.addf %out, %in : f32
1792    linalg.yield %12 : f32
1793  } -> tensor<f32>
1794  return %0 : tensor<f32>
1795}
1796
1797module attributes {transform.with_named_sequence} {
1798  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1799    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1800    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1801    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1802    transform.yield
1803  }
1804}
1805
1806// -----
1807
1808// CHECK-LABEL: func @test_masked_pad_static_dynamic
1809func.func @test_masked_pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index,
1810                  %pad_value: f32) -> tensor<6x?x?x?xf32> {
1811  // CHECK: tensor.pad
1812  %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] {
1813    ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
1814      tensor.yield %pad_value : f32
1815    } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32>
1816  return %0 : tensor<6x?x?x?xf32>
1817}
1818
1819
1820module attributes {transform.with_named_sequence} {
1821  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1822    %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1823    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1824    %2 = transform.structured.vectorize_children_and_apply_patterns %1  { vectorize_padding } : (!transform.any_op) -> !transform.any_op
1825    transform.yield
1826  }
1827}
1828
1829// -----
1830
1831func.func @zero_dim_tensor(%input: tensor<f32>, %output: tensor<f32>) -> tensor<f32>
1832{
1833  %0 = linalg.generic { indexing_maps = [ affine_map<() -> ()>, affine_map<() -> ()> ],
1834                        iterator_types = [] }
1835                        ins(%input : tensor<f32>)
1836                        outs(%output : tensor<f32>) {
1837    ^bb0(%arg0: f32, %arg1: f32):
1838      %2 = arith.addf %arg0, %arg1 : f32
1839      linalg.yield %2 : f32
1840    } -> tensor<f32>
1841  return %0 : tensor<f32>
1842}
1843
1844module attributes {transform.with_named_sequence} {
1845  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1846    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1847    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1848    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1849    transform.yield
1850  }
1851}
1852
1853// CHECK-LABEL: func @zero_dim_tensor
1854//       CHECK:     vector.transfer_read {{.*}} : tensor<f32>, vector<f32>
1855//       CHECK:     vector.extract
1856//       CHECK:     vector.transfer_read {{.*}} : tensor<f32>, vector<f32>
1857//       CHECK:     vector.extract
1858//       CHECK:     arith.addf {{.*}} : f32
1859//       CHECK:     vector.broadcast %{{.*}} : f32 to vector<f32>
1860//       CHECK:     vector.transfer_write {{.*}} : vector<f32>, tensor<f32>
1861
1862// -----
1863
1864// Make sure we generate the right transfer writes for multi-output generic ops
1865// with different permutation maps.
1866
1867func.func @multi_output_generic_different_perm_maps(%in0: tensor<4x1xf32>,
1868                                                    %out0: tensor<4x1xf32>,
1869                                                    %out1: tensor<1x4xf32>) -> (tensor<4x1xf32>, tensor<1x4xf32>) {
1870  %13:2 = linalg.generic {indexing_maps = [ affine_map<(d0, d1) -> (d1, d0)>,
1871                                            affine_map<(d0, d1) -> (d1, d0)>,
1872                                            affine_map<(d0, d1) -> (d0, d1)> ],
1873                          iterator_types = ["parallel", "parallel"]}
1874                          ins(%in0 : tensor<4x1xf32>)
1875                          outs(%out0, %out1 : tensor<4x1xf32>, tensor<1x4xf32>) {
1876  ^bb0(%in: f32, %out: f32, %out_2: f32):
1877    %16 = arith.addf %in, %in : f32
1878    linalg.yield %16, %16 : f32, f32
1879  } -> (tensor<4x1xf32>, tensor<1x4xf32>)
1880  return %13#0, %13#1 : tensor<4x1xf32>, tensor<1x4xf32>
1881}
1882
1883module attributes {transform.with_named_sequence} {
1884  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
1885    %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
1886    %4 = transform.get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1887    %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op
1888    transform.yield
1889  }
1890}
1891
1892// CHECK-LABEL: func @multi_output_generic_different_perm_maps
1893//       CHECK:     %[[VAL_5:.*]] = vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor<4x1xf32>, vector<4x1xf32>
1894//       CHECK:     %[[VAL_6:.*]] = arith.addf %[[VAL_5]], %[[VAL_5]] : vector<4x1xf32>
1895//       CHECK:     %[[VAL_7:.*]] = vector.transpose %[[VAL_6]], [1, 0] : vector<4x1xf32> to vector<1x4xf32>
1896//       CHECK:     %[[VAL_8:.*]] = vector.transpose %[[VAL_7]], [1, 0] : vector<1x4xf32> to vector<4x1xf32>
1897//       CHECK:     vector.transfer_write %[[VAL_8]], %{{.*}} {in_bounds = [true, true]} : vector<4x1xf32>, tensor<4x1xf32>
1898//       CHECK:     vector.transfer_write %[[VAL_7]], %{{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x4xf32>
1899
1900// -----
1901
1902// Extracted from: https://github.com/llvm/llvm-project/issues/97247
1903
1904#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
1905#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, 0)>
1906
1907func.func @generic_with_reduction_and_broadcast(%arg0: tensor<1x12x197x197xf32>) -> (tensor<1x12x197x1xf32>) {
1908  %0 = tensor.empty() : tensor<1x12x197x1xf32>
1909  %1 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "reduction"]} ins(%arg0 : tensor<1x12x197x197xf32>) outs(%0 : tensor<1x12x197x1xf32>) {
1910  ^bb0(%in: f32, %out: f32):
1911    %818 = arith.addf %in, %out : f32
1912    linalg.yield %818 : f32
1913  } -> tensor<1x12x197x1xf32>
1914  return %1 : tensor<1x12x197x1xf32>
1915}
1916module attributes {transform.with_named_sequence} {
1917  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
1918    %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
1919    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1920    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1921    transform.yield
1922  }
1923}
1924
1925// CHECK: #[[$ATTR_32:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
1926
1927// CHECK-LABEL:   func.func @generic_with_reduction_and_broadcast(
1928// CHECK-SAME:                                                    %[[VAL_0:.*]]: tensor<1x12x197x197xf32>) -> tensor<1x12x197x1xf32> {
1929// CHECK:           %[[VAL_1:.*]] = arith.constant 0.000000e+00 : f32
1930// CHECK:           %[[VAL_2:.*]] = arith.constant 0 : index
1931// CHECK:           %[[VAL_3:.*]] = tensor.empty() : tensor<1x12x197x1xf32>
1932// CHECK:           %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_2]], %[[VAL_2]], %[[VAL_2]], %[[VAL_2]]], %[[VAL_1]] {in_bounds = [true, true, true, true]} : tensor<1x12x197x197xf32>, vector<1x12x197x197xf32>
1933// CHECK:           %[[VAL_5:.*]] = vector.transfer_read %[[VAL_3]]{{\[}}%[[VAL_2]], %[[VAL_2]], %[[VAL_2]], %[[VAL_2]]], %[[VAL_1]] {in_bounds = [true, true, true], permutation_map = #[[$ATTR_32]]} : tensor<1x12x197x1xf32>, vector<1x12x197xf32>
1934// CHECK:           %[[VAL_6:.*]] = vector.multi_reduction <add>, %[[VAL_4]], %[[VAL_5]] [3] : vector<1x12x197x197xf32> to vector<1x12x197xf32>
1935// CHECK:           %[[VAL_7:.*]] = vector.broadcast %[[VAL_6]] : vector<1x12x197xf32> to vector<1x1x12x197xf32>
1936// CHECK:           %[[VAL_8:.*]] = vector.transpose %[[VAL_7]], [1, 2, 3, 0] : vector<1x1x12x197xf32> to vector<1x12x197x1xf32>
1937// CHECK:           %[[VAL_9:.*]] = vector.transfer_write %[[VAL_8]], %[[VAL_3]]{{\[}}%[[VAL_2]], %[[VAL_2]], %[[VAL_2]], %[[VAL_2]]] {in_bounds = [true, true, true, true]} : vector<1x12x197x1xf32>, tensor<1x12x197x1xf32>
1938// CHECK:           return %[[VAL_9]] : tensor<1x12x197x1xf32>
1939
1940// -----
1941
1942// Input identical as the test in vectorization.mlir. Output is different -
1943// vector sizes are inferred (rather than user-specified) and hence _no_
1944// masking was used.
1945
1946func.func @test_vectorize_pack(%arg0: tensor<32x8x16xf32>, %arg1: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
1947  %pack = tensor.pack %arg0 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32>
1948  return %pack : tensor<4x1x32x16x2xf32>
1949}
1950
1951module attributes {transform.with_named_sequence} {
1952  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
1953    %0 = transform.structured.match ops{["tensor.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
1954    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
1955    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
1956    transform.yield
1957  }
1958}
1959
1960// CHECK-LABEL:   func.func @test_vectorize_pack(
1961// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x8x16xf32>,
1962// CHECK-SAME:      %[[VAL_1:.*]]: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
1963// CHECK:           %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
1964// CHECK:           %[[VAL_3:.*]] = arith.constant 0 : index
1965// CHECK:           %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32>
1966// CHECK:           %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
1967// CHECK:           %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [1, 3, 0, 4, 2] : vector<32x4x2x1x16xf32> to vector<4x1x32x16x2xf32>
1968// CHECK:           %[[VAL_7:.*]] = tensor.empty() : tensor<4x1x32x16x2xf32>
1969// CHECK:           %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<4x1x32x16x2xf32>, tensor<4x1x32x16x2xf32>
1970// CHECK:           return %[[VAL_8]] : tensor<4x1x32x16x2xf32>
1971
1972// -----
1973
1974// Input identical as the test in vectorization.mlir. Output is different -
1975// vector sizes are inferred (rather than user-specified) and hence _no_
1976// masking was used.
1977
1978func.func @test_vectorize_padded_pack(%arg0: tensor<32x7x15xf32>, %arg1: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
1979  %pad = arith.constant 0.000000e+00 : f32
1980  %pack = tensor.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x7x15xf32> -> tensor<32x4x1x16x2xf32>
1981  return %pack : tensor<32x4x1x16x2xf32>
1982}
1983
1984// CHECK-LABEL:   func.func @test_vectorize_padded_pack(
1985// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x7x15xf32>,
1986// CHECK-SAME:      %[[VAL_1:.*]]: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
1987// CHECK:           %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
1988// CHECK:           %[[VAL_3:.*]] = arith.constant 0 : index
1989// CHECK:           %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, false, false]} : tensor<32x7x15xf32>, vector<32x8x16xf32>
1990// CHECK:           %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
1991// CHECK:           %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [0, 1, 3, 4, 2] : vector<32x4x2x1x16xf32> to vector<32x4x1x16x2xf32>
1992// CHECK:           %[[VAL_7:.*]] = tensor.empty() : tensor<32x4x1x16x2xf32>
1993// CHECK:           %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<32x4x1x16x2xf32>, tensor<32x4x1x16x2xf32>
1994// CHECK:           return %[[VAL_8]] : tensor<32x4x1x16x2xf32>
1995
1996module attributes {transform.with_named_sequence} {
1997  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
1998    %0 = transform.structured.match ops{["tensor.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
1999    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
2000    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
2001    transform.yield
2002  }
2003}
2004
2005// -----
2006
2007///----------------------------------------------------------------------------------------
2008/// tensor.insert_slice
2009///----------------------------------------------------------------------------------------
2010
2011// The pad value for xfer-read is neither needed nor available - use the default (0.0).
2012
2013// CHECK-LABEL: func @insert_static_slice_default_pad
2014// CHECK-SAME:      %[[ARG_0:.*]]: tensor<1x2x3xf32>,
2015// CHECK-SAME:      %[[ARG_1:.*]]: tensor<9x8x7x1x2x3xf32>) -> tensor<9x8x7x1x2x3xf32> {
2016// CHECK:           %[[PAD:.*]] = arith.constant 0.000000e+00 : f32
2017// CHECK:           %[[C0:.*]] = arith.constant 0 : index
2018// CHECK:           %[[READ:.*]] = vector.transfer_read %[[ARG_0]]{{\[}}%[[C0]], %[[C0]], %[[C0]]], %[[PAD]] {in_bounds = [true, true, true]} : tensor<1x2x3xf32>, vector<1x2x3xf32>
2019// CHECK:           %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[ARG_1]]{{\[}}%[[C0]], %[[C0]], %[[C0]], %[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<1x2x3xf32>, tensor<9x8x7x1x2x3xf32>
2020// CHECK:           return %[[WRITE]] : tensor<9x8x7x1x2x3xf32>
2021func.func @insert_static_slice_default_pad(%arg0: tensor<1x2x3xf32>, %arg1: tensor<9x8x7x1x2x3xf32>) -> tensor<9x8x7x1x2x3xf32> {
2022  %res = tensor.insert_slice %arg0 into %arg1[0, 0, 0, 0, 0, 0] [1, 1, 1, 1, 2, 3][1, 1, 1, 1, 1, 1] : tensor<1x2x3xf32> into tensor<9x8x7x1x2x3xf32>
2023  return %res : tensor<9x8x7x1x2x3xf32>
2024}
2025
2026module attributes {transform.with_named_sequence} {
2027  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
2028    %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
2029    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
2030    %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op
2031    transform.yield
2032  }
2033}
2034
2035// -----
2036
2037// Same as above, but there's a pad value available that should be used instead of the default value.
2038
2039// CHECK-LABEL:   func.func @insert_static_slice_non_zero_pad
2040// CHECK-SAME:      %[[ARG_0:.*]]: tensor<1x2x3xf32>,
2041// CHECK-SAME:      %[[PAD:.*]]: f32) -> tensor<9x8x7x1x2x3xf32> {
2042// CHECK:           %[[EMPTY:.*]] = tensor.empty() : tensor<9x8x7x1x2x3xf32>
2043// CHECK:           %[[BC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<9x8x7x1x2x3xf32>
2044// CHECK:           %[[WRITE:.*]] = vector.transfer_write %[[BC]], %[[EMPTY]]{{.*}} {in_bounds = [true, true, true, true, true, true]} : vector<9x8x7x1x2x3xf32>, tensor<9x8x7x1x2x3xf32>
2045// CHECK:           %[[READ:.*]] = vector.transfer_read %[[ARG_0]]{{.*}}, %[[PAD]] {in_bounds = [true, true, true]} : tensor<1x2x3xf32>, vector<1x2x3xf32>
2046// CHECK:           %[[RES:.*]] = vector.transfer_write %[[READ]], %[[WRITE]]{{.*}} {in_bounds = [true, true, true]} : vector<1x2x3xf32>, tensor<9x8x7x1x2x3xf32>
2047// CHECK:           return %[[RES]] : tensor<9x8x7x1x2x3xf32>
2048func.func @insert_static_slice_non_zero_pad(%arg0: tensor<1x2x3xf32>, %pad : f32) -> tensor<9x8x7x1x2x3xf32> {
2049  %init = tensor.empty() : tensor<9x8x7x1x2x3xf32>
2050  %fill = linalg.fill ins(%pad : f32) outs(%init : tensor<9x8x7x1x2x3xf32>) -> tensor<9x8x7x1x2x3xf32>
2051  %res = tensor.insert_slice %arg0 into %fill[0, 0, 0, 0, 0, 0] [1, 1, 1, 1, 2, 3][1, 1, 1, 1, 1, 1] : tensor<1x2x3xf32> into tensor<9x8x7x1x2x3xf32>
2052  return %res : tensor<9x8x7x1x2x3xf32>
2053}
2054
2055module attributes {transform.with_named_sequence} {
2056  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
2057    %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
2058    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
2059    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
2060    transform.yield
2061  }
2062}
2063
2064// -----
2065
2066// Same as above, but the source type has is dynamically shaped. This means
2067// that the pad value is now required and the vector dim corresponding to the
2068// dynamic shape has to be inferred from the shape of the destination tensor.
2069
2070// CHECK-LABEL:   func.func @insert_dynamic_slice_non_zero_pad(
2071// CHECK-SAME:      %[[ARG_0:.*]]: tensor<1x?x3xf32>,
2072// CHECK-SAME:      %[[PAD:.*]]: f32,
2073// CHECK-SAME:      %[[SIZE:.*]]: index) -> tensor<9x8x7x1x2x3xf32> {
2074// CHECK:           %[[EMPTY:.*]] = tensor.empty() : tensor<9x8x7x1x2x3xf32>
2075// CHECK:           %[[BC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<9x8x7x1x2x3xf32>
2076// CHECK:           %[[WRITE:.*]] = vector.transfer_write %[[BC]], %[[EMPTY]]{{.*}} {in_bounds = [true, true, true, true, true, true]} : vector<9x8x7x1x2x3xf32>, tensor<9x8x7x1x2x3xf32>
2077// CHECK:           %[[READ:.*]] = vector.transfer_read %[[ARG_0]]{{.*}}, %[[PAD]] {in_bounds = [true, false, true]} : tensor<1x?x3xf32>, vector<1x2x3xf32>
2078// CHECK:           %[[RES:.*]] = vector.transfer_write %[[READ]], %[[WRITE]]{{.*}} {in_bounds = [true, true, true]} : vector<1x2x3xf32>, tensor<9x8x7x1x2x3xf32>
2079// CHECK:           return %[[RES]] : tensor<9x8x7x1x2x3xf32>
2080func.func @insert_dynamic_slice_non_zero_pad(%arg0: tensor<1x?x3xf32>, %pad : f32, %size: index) -> tensor<9x8x7x1x2x3xf32> {
2081  %init = tensor.empty() : tensor<9x8x7x1x2x3xf32>
2082  %fill = linalg.fill ins(%pad : f32) outs(%init : tensor<9x8x7x1x2x3xf32>) -> tensor<9x8x7x1x2x3xf32>
2083  %res = tensor.insert_slice %arg0 into %fill[0, 0, 0, 0, 0, 0] [1, 1, 1, 1, %size, 3][1, 1, 1, 1, 1, 1] : tensor<1x?x3xf32> into tensor<9x8x7x1x2x3xf32>
2084  return %res : tensor<9x8x7x1x2x3xf32>
2085}
2086
2087module attributes {transform.with_named_sequence} {
2088  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
2089    %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
2090    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
2091    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
2092    transform.yield
2093  }
2094}
2095