xref: /llvm-project/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir (revision 39ad84e4d173b43dcd13209dc7c62de7a0476c80)
1// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s
2
3func.func @conv1d_nwc_wcf_dyn_ch_dim(%input: memref<4x6x?xf32>, %filter: memref<1x?x8xf32>, %output: memref<4x2x8xf32>) {
4  // expected-error @+1 {{Attempted to vectorize, but failed}}
5  linalg.conv_1d_nwc_wcf
6    {dilations = dense<1> : tensor<1xi64>, strides = dense<3> : tensor<1xi64>}
7    ins(%input, %filter : memref<4x6x?xf32>, memref<1x?x8xf32>)
8    outs(%output : memref<4x2x8xf32>)
9  return
10}
11
12module attributes {transform.with_named_sequence} {
13  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
14    %0 = transform.structured.match ops{["linalg.conv_1d_nwc_wcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
15    transform.structured.vectorize %0 : !transform.any_op
16    transform.yield
17  }
18}
19
20// -----
21
22// Masked vectorisation of 1D depthwise CW convs is not yet supported
23
24func.func @depthwise_conv1d_ncw_cw(%input: memref<3x?x4xf32>, %filter: memref<?x1xf32>, %output: memref<3x?x4xf32>) {
25  // expected-error @+1 {{Attempted to vectorize, but failed}}
26  linalg.depthwise_conv_1d_ncw_cw
27    {dilations = dense<2> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
28    ins(%input, %filter : memref<3x?x4xf32>, memref<?x1xf32>)
29    outs(%output : memref<3x?x4xf32>)
30  return
31}
32
33module attributes {transform.with_named_sequence} {
34  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
35    %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_ncw_cw"]} in %arg1 : (!transform.any_op) -> !transform.any_op
36    transform.structured.vectorize %0 vector_sizes [3, 4, 5, 1] : !transform.any_op
37    transform.yield
38  }
39}
40
41// -----
42
43func.func @depthwise_conv1d_nwc_wc_dyn_w_dim(%input: memref<3x?x4xf32>, %filter: memref<?x4xf32>, %output: memref<3x?x4xf32>) {
44  // expected-error @+1 {{Attempted to vectorize, but failed}}
45  linalg.depthwise_conv_1d_nwc_wc
46    {dilations = dense<2> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
47    ins(%input, %filter : memref<3x?x4xf32>, memref<?x4xf32>)
48    outs(%output : memref<3x?x4xf32>)
49  return
50}
51
52module attributes {transform.with_named_sequence} {
53  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
54    %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
55    transform.structured.vectorize %0 vector_sizes [3, 2, 4, 2] : !transform.any_op
56    transform.yield
57  }
58}
59
60// -----
61
62func.func @depthwise_conv1d_nwc_wc_dyn_ch_dim(%input: memref<3x5x?xf32>, %filter: memref<2x?xf32>, %output: memref<3x2x?xf32>) {
63  // expected-error @+1 {{Attempted to vectorize, but failed}}
64  linalg.depthwise_conv_1d_nwc_wc
65    ins(%input, %filter : memref<3x5x?xf32>, memref<2x?xf32>)
66    outs(%output : memref<3x2x?xf32>)
67  return
68}
69
70module attributes {transform.with_named_sequence} {
71  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
72    %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
73    transform.structured.vectorize %0 : !transform.any_op
74    transform.yield
75  }
76}
77
78// -----
79
80func.func @depthwise_conv1d_nwc_wc_dyn_w_dim(%input: memref<3x?x3xf32>, %filter: memref<2x3xf32>, %output: memref<3x?x3xf32>) {
81  // expected-error @+1 {{Attempted to vectorize, but failed}}
82  linalg.depthwise_conv_1d_nwc_wc
83    ins(%input, %filter : memref<3x?x3xf32>, memref<2x3xf32>)
84    outs(%output : memref<3x?x3xf32>)
85  return
86}
87
88module attributes {transform.with_named_sequence} {
89  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
90    %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
91    transform.structured.vectorize %0 : !transform.any_op
92    transform.yield
93  }
94}
95
96// -----
97
98func.func @conv1d_dyn_w_dim(%input: tensor<?xf32>, %filter: tensor<4xf32>, %output: tensor<?xf32>) -> tensor<?xf32> {
99  // expected-error @+1 {{Attempted to vectorize, but failed}}
100  %0 = linalg.conv_1d ins(%input, %filter : tensor<?xf32>, tensor<4xf32>)
101                     outs(%output : tensor<?xf32>) -> tensor<?xf32>
102  return %0 : tensor<?xf32>
103}
104
105module attributes {transform.with_named_sequence} {
106  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
107    %0 = transform.structured.match ops{["linalg.conv_1d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
108    transform.structured.vectorize %0 : !transform.any_op
109    transform.yield
110  }
111}
112
113// -----
114
115func.func @test_pack_no_vectorize_dynamic_shape(%arg0: tensor<?xf32>, %arg1: tensor<4x16xf32>) -> tensor<4x16xf32> {
116  %pad = arith.constant 0.000000e+00 : f32
117  // expected-error @+1 {{Attempted to vectorize, but failed}}
118  %pack = tensor.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [0] inner_tiles = [16] into %arg1 : tensor<?xf32> -> tensor<4x16xf32>
119  return %pack : tensor<4x16xf32>
120}
121
122module attributes {transform.with_named_sequence} {
123  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
124    %0 = transform.structured.match ops{["tensor.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
125    transform.structured.vectorize %0 : !transform.any_op
126    transform.yield
127  }
128}
129
130// -----
131
132func.func @linalg_reduce_scalable_leading_dim(%input: tensor<?x?xf32>,
133                                              %acc: tensor<?xf32>) -> tensor<?xf32> {
134
135  // expected-error @+1 {{Attempted to vectorize, but failed}}
136  %0 = linalg.reduce ins(%input : tensor<?x?xf32>) outs(%acc : tensor<?xf32>) dimensions = [0]
137  (%in: f32, %init: f32) {
138    %0 = arith.addf %in, %init : f32
139    linalg.yield %0 : f32
140  }
141  return %0 : tensor<?xf32>
142}
143
144module attributes {transform.with_named_sequence} {
145  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
146    %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op
147    transform.structured.vectorize %0 vector_sizes [[4], 1] : !transform.any_op
148    transform.yield
149  }
150}
151
152// -----
153
154func.func @linalg_generic_reduction_scalable_leading_dim(%input: tensor<?x?xf32>,
155                                                         %acc: tensor<?xf32>) -> tensor<?xf32> {
156
157  // expected-error @+1 {{Attempted to vectorize, but failed}}
158  %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
159                                         affine_map<(d0, d1) -> (d1)>],
160                        iterator_types = ["reduction", "parallel"] }
161    ins(%input : tensor<?x?xf32>)
162    outs(%acc : tensor<?xf32>) {
163    ^bb(%in: f32, %out: f32) :
164      %0 = arith.addf %in, %out : f32
165      linalg.yield %0 : f32
166    } -> tensor<?xf32>
167  return %0 : tensor<?xf32>
168}
169
170module attributes {transform.with_named_sequence} {
171  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
172    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
173    transform.structured.vectorize %0 vector_sizes [[4], 1] : !transform.any_op
174    transform.yield
175  }
176}
177
178// -----
179
180func.func @linalg_matvec_scalable_two_dims(%A: memref<?x?xf32>, %B: memref<?xf32>, %C: memref<?xf32>) {
181  // expected-error @+1 {{Attempted to vectorize, but failed}}
182  linalg.matvec ins(%A, %B: memref<?x?xf32>, memref<?xf32>)
183                outs(%C: memref<?xf32>)
184  return
185}
186
187module attributes {transform.with_named_sequence} {
188  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
189    %matmul = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op
190    transform.structured.vectorize %matmul vector_sizes [[4], [4]] : !transform.any_op
191    transform.yield
192  }
193}
194
195// -----
196
197func.func @linalg_matmul_scalable_leading_parallel_dim(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
198  // expected-error @+1 {{Attempted to vectorize, but failed}}
199  linalg.matmul ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
200                outs(%C: memref<?x?xf32>)
201  return
202}
203
204module attributes {transform.with_named_sequence} {
205  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
206    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
207    transform.structured.vectorize %matmul vector_sizes [[8], 16, 4] : !transform.any_op
208    transform.yield
209  }
210}
211
212// -----
213
214func.func @linalg_matmul_scalable_trailing_reduction_dim(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
215  // expected-error @+1 {{Attempted to vectorize, but failed}}
216  linalg.matmul ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
217                outs(%C: memref<?x?xf32>)
218  return
219}
220
221module attributes {transform.with_named_sequence} {
222  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
223    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
224    transform.structured.vectorize %matmul vector_sizes [8, 16, [4]] : !transform.any_op
225    transform.yield
226  }
227}
228
229// -----
230
231func.func @linalg_generic_matmul_scalable_two_trailing_dims(%A: tensor<?x64xf32>, %B: tensor<64x?xf32>,
232                                                            %C: tensor<?x?xf32>) -> tensor<?x?xf32> {
233
234  // expected-error @+1 {{Attempted to vectorize, but failed}}
235  %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>,
236                                         affine_map<(d0, d1, d2) -> (d2, d1)>,
237                                         affine_map<(d0, d1, d2) -> (d0, d1)>],
238                        iterator_types = ["parallel", "parallel", "reduction"] }
239    ins(%A, %B : tensor<?x64xf32>, tensor<64x?xf32>)
240    outs(%C: tensor<?x?xf32>) {
241    ^bb(%in1: f32, %in2: f32, %out: f32) :
242      %0 = arith.mulf %in1, %in2 : f32
243      %1 = arith.addf %0, %out : f32
244      linalg.yield %1 : f32
245    } -> tensor<?x?xf32>
246  return %0 : tensor<?x?xf32>
247}
248
249module attributes {transform.with_named_sequence} {
250  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
251    %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
252    transform.structured.vectorize %0 vector_sizes [2, [4], [4]] : !transform.any_op
253    transform.yield
254  }
255}
256
257// -----
258
259// With dynamically shaped source, the vectorizer infers the vector size for
260// xfer Ops from the destination tensor and, conservatively, assumes
261// out-of-bounds accesses. Out-of-bounds accesses require a pad value, but
262// that's impossible to recover in this example. Hence no vectorization.
263
264// TODO: Use diagnostics once we can vectorize tensor.insert_slice with
265// transform.structured.vectorize
266
267// CHECK-LABEL: @insert_dynamic_slice_unknown_pad
268// CHECK-NOT: vector
269// CHECK: tensor.insert_slice
270func.func @insert_dynamic_slice_unknown_pad(%arg0: tensor<1x?x3xf32>, %arg1: tensor<9x8x7x1x2x3xf32>, %size: index) -> tensor<9x8x7x1x2x3xf32> {
271  %res = tensor.insert_slice %arg0 into %arg1[0, 0, 0, 0, 0, 0] [1, 1, 1, 1, %size, 3][1, 1, 1, 1, 1, 1] : tensor<1x?x3xf32> into tensor<9x8x7x1x2x3xf32>
272  return %res : tensor<9x8x7x1x2x3xf32>
273}
274
275module attributes {transform.with_named_sequence} {
276  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
277    %0 = transform.structured.match ops{["tensor.insert_slice"]} in %arg1 : (!transform.any_op) -> !transform.any_op
278    %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
279    %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
280    transform.yield
281  }
282}
283