xref: /llvm-project/mlir/test/Dialect/Linalg/invalid.mlir (revision 0d4efa27252cbbea4b5672d4d8ffc15a3ba51d83)
1// RUN: mlir-opt %s -split-input-file -verify-diagnostics
2
3func.func @load_number_of_indices(%v : memref<f32>) {
4  // expected-error @+2 {{incorrect number of indices for load}}
5  %c0 = arith.constant 0 : index
6  memref.load %v[%c0] : memref<f32>
7}
8
9// -----
10
11func.func @store_number_of_indices(%v : memref<f32>) {
12  // expected-error @+3 {{store index operand count not equal to memref rank}}
13  %c0 = arith.constant 0 : index
14  %f0 = arith.constant 0.0 : f32
15  memref.store %f0, %v[%c0] : memref<f32>
16}
17
18// -----
19
20func.func @yield_parent(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
21  // expected-error @+1 {{op expected parent op with LinalgOp interface}}
22  linalg.yield %arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>
23}
24
25// -----
26
27func.func @index_parent() {
28  // expected-error @+1 {{op expected parent op with LinalgOp interface}}
29  linalg.index 0 : index
30}
31
32// -----
33
34func.func @index_dim_lower_than_number_of_loops(%arg0: memref<f32>) {
35  // expected-error @+6 {{op expected dim (2) to be lower than the number of loops (0) of the enclosing LinalgOp}}
36  linalg.generic {
37      indexing_maps =  [ affine_map<() -> ()> ],
38      iterator_types = []}
39      outs(%arg0 : memref<f32>) {
40    ^bb(%0: f32):
41      linalg.index 2 : index
42      linalg.yield %0 : f32
43  }
44}
45
46// -----
47
48func.func @index_dim_negative(%arg0: memref<f32>) {
49  // expected-error @+6 {{op attribute 'dim' failed to satisfy constraint: 64-bit signless integer attribute whose minimum value is 0}}
50  linalg.generic {
51      indexing_maps =  [ affine_map<() -> ()> ],
52      iterator_types = []}
53      outs(%arg0 : memref<f32>) {
54    ^bb(%0: f32):
55      linalg.index -1 : index
56      linalg.yield %0 : f32
57  }
58}
59
60// -----
61
62func.func @generic_no_region(%arg0: memref<f32>) {
63  // expected-error @+4 {{expected '{' to begin a region}}
64  linalg.generic {
65    indexing_maps =  [ affine_map<() -> (0)> ],
66    iterator_types = []
67  } ins(%arg0 : memref<f32>)
68}
69
70// -----
71
72func.func @generic_mismatched_num_returns(%arg0: memref<f32>) {
73  // expected-error @+6 {{op expected number of yield values (0) to match the number of inits / outs operands of the enclosing LinalgOp (1)}}
74  linalg.generic {
75      indexing_maps =  [ affine_map<() -> ()> ],
76      iterator_types = []}
77      outs(%arg0 : memref<f32>) {
78    ^bb(%0: f32):
79      linalg.yield
80  }
81}
82
83// -----
84
85func.func @generic_wrong_dim_in_map(%arg0: memref<1xi32>) {
86  // expected-error @+1 {{op expected indexing_map #0 to have 1 dim(s) to match the number of loops}}
87  linalg.generic {
88    indexing_maps =  [ affine_map<() -> (0)> ],
89    iterator_types = ["parallel"]}
90      outs(%arg0 : memref<1xi32>) {
91    ^bb(%i : i32):
92    linalg.yield %i : i32
93  }
94}
95
96// -----
97
98func.func @generic_wrong_iterator(%arg0: memref<1xi32>) {
99  // expected-error @+4 {{unexpected iterator_type (random)}}
100  linalg.generic {
101    indexing_maps =  [ affine_map<(i) -> (i)> ],
102    iterator_types = ["random"]}
103      outs(%arg0 : memref<1xi32>) {
104    ^bb(%i : i32):
105    linalg.yield %i : i32
106  }
107}
108
109// -----
110
111func.func @generic_one_d_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
112  // expected-error @+1 {{expected operand rank (1) to match the result rank of indexing_map #0 (2)}}
113  linalg.generic {
114    indexing_maps =  [ affine_map<() -> (0, 0)> ],
115    iterator_types = []}
116      outs(%arg0 : memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
117    ^bb(%f : f32):
118      linalg.yield %f: f32
119  }
120}
121
122// -----
123
124func.func @generic_scalar_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
125  %cst = arith.constant 0.0 : f32
126  // expected-error @+1 {{expected operand rank (0) to match the result rank of indexing_map #0 (1)}}
127  linalg.generic {
128    indexing_maps =  [ affine_map<() -> (0)>, affine_map<() -> (0, 0)> ],
129    iterator_types = []}
130      ins(%cst : f32)
131      outs(%arg0 : memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
132    ^bb(%0 : f32, %1 : f32):
133      linalg.yield %0: f32
134  }
135}
136
137// -----
138
139func.func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
140  // expected-error @+7 {{'linalg.yield' op type of yield operand 1 ('i4') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
141  linalg.generic {
142    indexing_maps =  [ affine_map<(i) -> (i)> ],
143    iterator_types = ["parallel"]}
144      outs(%arg0 : memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
145    ^bb(%0: f32):
146      %1 = arith.constant 1: i4
147      linalg.yield %1: i4
148  }
149}
150
151// -----
152
153func.func @generic_singular_maps(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>, %arg1: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
154  // expected-error @+1 {{expected the shape-to-loops map to be non-null}}
155  linalg.generic {
156    indexing_maps =  [
157      affine_map<(i, j) -> (i + j)>,
158      affine_map<(i, j) -> (i + j)>
159    ],
160    iterator_types = ["parallel","parallel"]}
161    ins(%arg0 : memref<?xf32, affine_map<(i)[off]->(off + i)>>)
162   outs(%arg1 : memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
163  ^bb(%0: f32, %1: f32):
164      linalg.yield %1: f32
165  }
166}
167
168////////////////////////////////////////////////////////////////////////////////
169///////////////////////////// Region tests /////////////////////////////////////
170////////////////////////////////////////////////////////////////////////////////
171
172// -----
173
174func.func @generic_empty_region(%arg0: memref<f32>) {
175  %f0 = arith.constant 0.0: f32
176  // expected-error @+1 {{op expects region #0 to have 0 or 1 blocks}}
177  linalg.generic {
178    indexing_maps =  [ affine_map<() -> ()>, affine_map<() -> ()> ],
179    iterator_types = []}
180      ins(%arg0 : memref<f32>)
181     outs(%arg0 : memref<f32>) {
182    ^bb1:
183      linalg.yield %f0: f32
184    ^bb2:
185      linalg.yield %f0: f32
186  }
187}
188
189// -----
190
191func.func @generic_empty_region(%arg0: memref<f32>) {
192  %f0 = arith.constant 0.0: f32
193  // expected-error @+1 {{op expects to have 1 region with 1 block}}
194  linalg.generic {
195    indexing_maps =  [ affine_map<() -> ()> , affine_map<() -> ()> ],
196    iterator_types = []}
197    ins(%arg0 : memref<f32>)
198   outs(%arg0 : memref<f32>) {
199  }
200}
201
202// -----
203
204func.func @generic_mismatched_num_arguments(%arg0: memref<f32>) {
205  // expected-error @+6 {{'linalg.yield' op expected number of yield values (1) to match the number of inits / outs operands of the enclosing LinalgOp (2)}}
206  linalg.generic {
207      indexing_maps =  [ affine_map<() -> ()>, affine_map<() -> ()> ],
208      iterator_types = []}
209      outs(%arg0, %arg0 : memref<f32>, memref<f32>) {
210    ^bb(%f: f32):
211      linalg.yield %f: f32
212  }
213}
214
215// -----
216
217func.func @generic_shaped_operand_block_arg_type(%arg0: memref<f32>) {
218  // expected-error @+6 {{'linalg.yield' op type of yield operand 1 ('i1') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
219  linalg.generic {
220    indexing_maps =  [ affine_map<() -> ()> ],
221    iterator_types = []}
222      outs(%arg0 : memref<f32>) {
223    ^bb(%i: i1):
224    linalg.yield %i : i1
225  }
226}
227
228// -----
229
230func.func @generic_scalar_operand_block_arg_type(%arg0: tensor<f32>) {
231  // expected-error @+6 {{'linalg.yield' op type of yield operand 1 ('i1') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
232  linalg.generic {
233    indexing_maps =  [ affine_map<() -> ()> ],
234    iterator_types = []}
235      outs(%arg0 : tensor<f32>) {
236    ^bb(%i: i1):
237    linalg.yield %i : i1
238  } -> tensor<f32>
239}
240
241// -----
242
243func.func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
244  // expected-error @+7 {{type of yield operand 1 ('i1') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
245  linalg.generic {
246    indexing_maps = [ affine_map<(i) -> (i)> ],
247    iterator_types = ["parallel"]}
248      outs(%arg0 : memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
249    ^bb(%i: f32):
250      %0 = arith.constant 0: i1
251      linalg.yield %0: i1
252  }
253}
254
255// -----
256
257func.func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>,
258                                 %arg1: tensor<?xf32>) {
259  // expected-error @+1 {{expected type of operand #1 ('tensor<?xf32>') to match type of corresponding result ('tensor<f32>')}}
260  %0 = linalg.generic {
261    indexing_maps = [ affine_map<(i) -> (i)> , affine_map<(i) -> (i)> ],
262    iterator_types = ["parallel"]}
263       ins(%arg0 : memref<?xf32, affine_map<(i)[off]->(off + i)>>)
264      outs(%arg1 : tensor<?xf32>) {
265    ^bb(%i: f32, %j: f32):
266      linalg.yield %i: f32
267  } -> tensor<f32>
268}
269
270// -----
271
272func.func @generic(%arg0: memref<?x?xf32>) {
273  // expected-error @+6 {{block with no terminator, has %0 = "arith.addf"(%arg1, %arg1) <{fastmath = #arith.fastmath<none>}> : (f32, f32) -> f32}}
274  linalg.generic  {
275    indexing_maps = [ affine_map<(i, j) -> (i, j)> ],
276    iterator_types = ["parallel", "parallel"]}
277      outs(%arg0 : memref<?x?xf32>) {
278    ^bb(%0: f32) :
279      %1 = arith.addf %0, %0: f32
280  }
281  return
282}
283
284// -----
285
286// This test is currently disabled: subject to verifier ordering issues.
287// Instead, when the ranks are not greater than 2, an assertion will be triggered
288// in LinalgStructuredOps.td::ConvOp::iterator_types() for now because the
289// verifier inspects the iterator_types. This is slated to become an
290// autogenerated op in the future, alleviating the issue.
291// func @conv_rank_limit(%arg0: memref<?xf32>, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
292//   // DISABLED_expected -error @+1 {{expects memref ranks to be greater than 2}}
293//   linalg.conv(%arg0, %arg1, %arg2) : memref<?xf32>, memref<?xf32>, memref<?xf32>
294// }
295//
296// // -----
297
298func.func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?xf32>) {
299  // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #1 (3)}}
300  linalg.batch_matmul ins(%a3, %b3: memref<?x?x?xf32>, memref<?x?xf32>)
301                     outs(%c3 : memref<?x?x?xf32>)
302  return
303}
304
305// -----
306
307func.func @incorrect_region_arg_count(%m: memref<?x?xf32>) {
308  // expected-error @+3 {{region expects 3 args, got 2}}
309  %res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>)
310                       -> (tensor<?x?xf32>, tensor<?x?xf32>)
311  return
312}
313
314// -----
315
316func.func @matching_inits(%m: memref<?x?xf32>, %t: tensor<?x?xf32>) {
317  // expected-error @+1 {{expected type of operand #2 ('tensor<?x?xf32>') to match type of corresponding result ('tensor<?xf32>')}}
318  %res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>)
319                      outs(%t : tensor<?x?xf32>)
320                        -> tensor<?xf32>
321  return
322}
323
324// -----
325
326func.func @illegal_fill_tensor_no_return(%arg0 : index, %arg1 : index, %arg2 : f32)
327{
328  %0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf32>
329  // expected-error @+1 {{expected the number of tensor results (0) to be equal to the number of output tensors (1)}}
330  linalg.fill ins(%arg2 : f32) outs(%0 : tensor<?x?xf32>)
331}
332
333// -----
334
335func.func @illegal_fill_memref_with_tensor_return
336  (%arg0 : memref<?x?xf32>, %arg1 : f32) -> tensor<?x?xf32>
337{
338  // expected-error @+1 {{expected the number of tensor results (1) to be equal to the number of output tensors (0)}}
339  %0 = linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?x?xf32>) -> tensor<?x?xf32>
340  return %0 : tensor<?x?xf32>
341}
342
343// -----
344
345func.func @illegal_fill_tensor_with_memref_return
346  (%arg0 : tensor<?x?xf32>, %arg1 : f32) -> memref<?x?xf32>
347{
348  // expected-error @+1 {{result #0 must be variadic of ranked tensor of any type values, but got 'memref<?x?xf32>'}}
349  %0 = linalg.fill ins(%arg1 : f32) outs(%arg0 : tensor<?x?xf32>) -> memref<?x?xf32>
350  return %0 : memref<?x?xf32>
351}
352
353// -----
354
355func.func @illegal_fill_value_type(%arg0 : tensor<2x2xf32>, %arg1 : tensor<2xf32>) -> tensor<2x2xf32>
356{
357  // expected-error @+1 {{expected op with scalar input}}
358  %0 = linalg.fill ins(%arg1 : tensor<2xf32>) outs(%arg0 : tensor<2x2xf32>) -> tensor<2x2xf32>
359  return %0 : tensor<2x2xf32>
360}
361
362// -----
363
364func.func @invalid_static_matmul(%arg0: memref<2x4xf32>, %arg1: memref<3x4xf32>, %arg2: memref<2x4xf32>) {
365  // expected-error @+1 {{inferred input/output operand #1 has shape's dimension #0 to be 4, but found 3}}
366  linalg.matmul ins(%arg0, %arg1 : memref<2x4xf32>, memref<3x4xf32>)
367                      outs(%arg2 :memref<2x4xf32>)
368  return
369}
370
371// -----
372
373func.func @invalid_scalar_input_matmul(%arg0: f32, %arg1: memref<3x4xf32>, %arg2: memref<2x4xf32>) {
374  // expected-error @+1 {{'linalg.matmul' op expected operand rank (0) to match the result rank of indexing_map #0 (2)}}
375  linalg.matmul ins(%arg0, %arg1 : f32, memref<3x4xf32>)
376                outs(%arg2 : memref<2x4xf32>)
377  return
378}
379
380// -----
381
382func.func @invalid_scalar_output_matmul(%arg0: memref<2x3xf32>, %arg1: memref<3x4xf32>, %arg2: f32) {
383  // expected-error @+1 {{'linalg.matmul' op operand #2 must be variadic of shaped of any type values, but got 'f32'}}
384  linalg.matmul ins(%arg0, %arg1 : memref<2x3xf32>, memref<3x4xf32>)
385                outs(%arg2 : f32)
386  return
387}
388
389// -----
390
391func.func @invalid_indexing_maps_matmul(%arg0: memref<2x4xf32>, %arg1: memref<3x4xf32>, %arg2: memref<2x4xf32>) {
392  // expected-error @+1 {{expected attribute value}}
393  linalg.matmul indexing_maps = [
394                       ,
395                       affine_map<(d0, d1, d2) -> (d2, d1)>,
396                       affine_map<(d0, d1, d2) -> (d0, d1)>
397                      ]
398                      ins(%arg0, %arg1 : memref<2x4xf32>, memref<3x4xf32>)
399                      outs(%arg2 :memref<2x4xf32>)
400  return
401}
402
403// -----
404
405func.func @invalid_matmul_dim_a(%arg0: memref<5x5xf32>, %arg1: memref<5x5xf32>, %arg2: memref<5x5xf32>) {
406  // expected-error @+1 {{Unexpected dim expression in map result}}
407  linalg.matmul indexing_maps = [
408                       affine_map<(d0, d1, d2) -> (d1, d2)>,
409                       affine_map<(d0, d1, d2) -> (d2, d1)>,
410                       affine_map<(d0, d1, d2) -> (d0, d1)>
411                     ]
412                     ins(%arg0, %arg1 : memref<5x5xf32>, memref<5x5xf32>) outs(%arg2: memref<5x5xf32>)
413  return
414}
415
416// -----
417
418func.func @invalid_matmul_dim_b(%arg0: memref<5x5xf32>, %arg1: memref<5x5xf32>, %arg2: memref<5x5xf32>) {
419  // expected-error @+1 {{Unexpected dim expression in map result}}
420  linalg.matmul indexing_maps = [
421                       affine_map<(d0, d1, d2) -> (d0, d2)>,
422                       affine_map<(d0, d1, d2) -> (d2, d0)>,
423                       affine_map<(d0, d1, d2) -> (d0, d1)>
424                     ]
425                     ins(%arg0, %arg1 : memref<5x5xf32>, memref<5x5xf32>) outs(%arg2: memref<5x5xf32>)
426  return
427}
428
429// -----
430
431func.func @invalid_transpose_a_matmul(%lhs: tensor<4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) -> tensor<4x64xf32> {
432  // expected-error @+1 {{inferred input/output operand #1 has shape's dimension #0 to be 4, but found 1}}
433  %0 = linalg.matmul indexing_maps = [
434                       affine_map<(d0, d1, d2) -> (d2, d0)>,
435                       affine_map<(d0, d1, d2) -> (d2, d1)>,
436                       affine_map<(d0, d1, d2) -> (d0, d1)>
437                      ]
438                      ins(%lhs, %rhs : tensor<4x1xf32>, tensor<1x64xf32>)
439                      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
440  return %0: tensor<4x64xf32>
441}
442
443// -----
444
445func.func @invalid_transpose_b_matmul(%lhs: tensor<4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) -> tensor<4x64xf32> {
446  // expected-error @+1 {{inferred input/output operand #1 has shape's dimension #1 to be 1, but found 64}}
447  %0 = linalg.matmul indexing_maps = [
448                       affine_map<(d0, d1, d2) -> (d0, d2)>,
449                       affine_map<(d0, d1, d2) -> (d1, d2)>,
450                       affine_map<(d0, d1, d2) -> (d0, d1)>
451                      ]
452                      ins(%lhs, %rhs : tensor<4x1xf32>, tensor<1x64xf32>)
453                      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
454  return %0: tensor<4x64xf32>
455}
456
457// -----
458
459func.func @invalid_bcast_a(%arg0: memref<3xf32>, %arg1: memref<5x7xf32>, %arg2: memref<3x7xf32>) {
460  // expected-error @+1 {{'linalg.matmul' op Invalid broadcast requested, should be (d2)}}
461  linalg.matmul indexing_maps = [
462                       affine_map<(d0, d1, d2) -> (d0)>,
463                       affine_map<(d0, d1, d2) -> (d1, d2)>,
464                       affine_map<(d0, d1, d2) -> (d0, d1)>
465                     ]
466                     ins(%arg0, %arg1 : memref<3xf32>, memref<5x7xf32>) outs(%arg2: memref<3x7xf32>)
467  return
468}
469
470// -----
471
472func.func @invalid_bcast_b(%arg0: memref<3x5xf32>, %arg1: memref<7xf32>, %arg2: memref<3x7xf32>) {
473  // expected-error @+1 {{'linalg.matmul' op Invalid broadcast requested, should be (d2)}}
474  linalg.matmul indexing_maps = [
475                       affine_map<(d0, d1, d2) -> (d0, d2)>,
476                       affine_map<(d0, d1, d2) -> (d1)>,
477                       affine_map<(d0, d1, d2) -> (d0, d1)>
478                     ]
479                     ins(%arg0, %arg1 : memref<3x5xf32>, memref<7xf32>) outs(%arg2: memref<3x7xf32>)
480  return
481}
482
483// -----
484
485func.func @invalid_bcast_a_rank_mismatch(%arg0: memref<3x5xf32>, %arg1: memref<5x7xf32>, %arg2: memref<3x7xf32>) {
486  // expected-error @+1 {{'linalg.matmul' op expected operand rank (2) to match the result rank of indexing_map #0 (1)}}
487  linalg.matmul indexing_maps = [
488                       affine_map<(d0, d1, d2) -> (d2)>,
489                       affine_map<(d0, d1, d2) -> (d2, d1)>,
490                       affine_map<(d0, d1, d2) -> (d0, d1)>
491                     ]
492                     ins(%arg0, %arg1 : memref<3x5xf32>, memref<5x7xf32>) outs(%arg2: memref<3x7xf32>)
493  return
494}
495
496// -----
497
498func.func @invalid_bcast_b_rank_mismatch(%arg0: memref<3x5xf32>, %arg1: memref<5x7xf32>, %arg2: memref<3x7xf32>) {
499  // expected-error @+1 {{'linalg.matmul' op expected operand rank (2) to match the result rank of indexing_map #1 (1)}}
500  linalg.matmul indexing_maps = [
501                       affine_map<(d0, d1, d2) -> (d0, d2)>,
502                       affine_map<(d0, d1, d2) -> (d2)>,
503                       affine_map<(d0, d1, d2) -> (d0, d1)>
504                     ]
505                     ins(%arg0, %arg1 : memref<3x5xf32>, memref<5x7xf32>) outs(%arg2: memref<3x7xf32>)
506  return
507}
508
509// -----
510
511func.func @invalid_matmul_bcast_b_transpose_a(%arg0: memref<5x3xf32>, %arg1: memref<7xf32>, %arg2: memref<3x7xf32>) {
512  // expected-error @+1 {{inferred input/output operand #1 has shape's dimension #0 to be 5, but found 7}}
513  linalg.matmul indexing_maps = [
514                       affine_map<(d0, d1, d2) -> (d2, d0)>,
515                       affine_map<(d0, d1, d2) -> (d2)>,
516                       affine_map<(d0, d1, d2) -> (d0, d1)>
517                     ]
518                     ins(%arg0, %arg1 : memref<5x3xf32>, memref<7xf32>) outs(%arg2: memref<3x7xf32>)
519  return
520}
521
522// -----
523
524func.func @invalid_matmul_bcast_b_transpose_a_wrong_dim(%arg0: memref<3x5xf32>, %arg1: memref<5xf32>, %arg2: memref<3x7xf32>) {
525  // expected-error @+1 {{'linalg.matmul' op Unexpected dim expression in map result.}}
526  linalg.matmul indexing_maps = [
527                       affine_map<(d0, d1, d2) -> (d1, d2)>,
528                       affine_map<(d0, d1, d2) -> (d2)>,
529                       affine_map<(d0, d1, d2) -> (d0, d1)>
530                     ]
531                     ins(%arg0, %arg1 : memref<3x5xf32>, memref<5xf32>) outs(%arg2: memref<3x7xf32>)
532  return
533}
534
535// -----
536
537func.func @invalid_indexing_maps_placement_matmul(%lhs: tensor<4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) {
538  // expected-error @+2 {{custom op 'indexing_maps' is unknown (tried 'func.indexing_maps' as well)}}
539  linalg.matmul ins(%lhs, %rhs : tensor<4x1xf32>, tensor<1x64xf32>) outs(%init : tensor<4x64xf32>)
540                        indexing_maps = [
541                       affine_map<(d0, d1, d2) -> (d0, d2)>,
542                       affine_map<(d0, d1, d2) -> (d2, d1)>,
543                       affine_map<(d0, d1, d2) -> (d0, d1)>
544                      ]
545  return
546}
547
548// -----
549
550func.func @invalid_indexing_maps_placement_contraction(
551    %lhs: tensor<4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) {
552  // expected-error @+3 {{custom op 'linalg.contract' expected 'indexing_maps' attribute}}
553  // NB: indexing_maps should be provided before ins and outs
554  linalg.contract
555      ins(%lhs, %rhs : tensor<4x1xf32>, tensor<1x64xf32>)
556      outs(%init : tensor<4x64xf32>)
557      indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>,
558                       affine_map<(d0, d1, d2) -> (d2, d1)>,
559                       affine_map<(d0, d1, d2) -> (d0, d1)>]
560  return
561}
562
563// -----
564
565func.func @invalid_affine_map_in_indexing_maps_contraction(
566    %lhs: tensor<4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) {
567  // expected-error @+1 {{provided affine_map is not a projected permutation}}
568  linalg.contract
569      indexing_maps = [affine_map<(d0, d1, d2) -> (d0 + d2, d2)>,
570                       affine_map<(d0, d1, d2) -> (d2, d1)>,
571                       affine_map<(d0, d1, d2) -> (d0, d1)>]
572      ins(%lhs, %rhs : tensor<4x1xf32>, tensor<1x64xf32>)
573      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
574  return
575}
576
577// -----
578
579func.func @differing_iteration_space_of_affine_maps_contraction(
580    %lhs: tensor<4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) {
581  // expected-error @+1 {{iteration spaces of provided affine_maps differ}}
582  linalg.contract
583      indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>,
584                       affine_map<(d0, d1, d2, d3) -> (d2, d1)>,
585                       affine_map<(d0, d1, d2) -> (d0, d1)>]
586      ins(%lhs, %rhs : tensor<4x1xf32>, tensor<1x64xf32>)
587      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
588  return
589}
590
591// -----
592
593func.func @mismatched_ranks_affine_map_and_operand_contraction(
594    %lhs: tensor<4x1x2xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) {
595  // expected-error @+1 {{ranks of shaped operand and results of corresponding affine_map differ}}
596  linalg.contract
597      indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>,
598                       affine_map<(d0, d1, d2) -> (d2, d1)>,
599                       affine_map<(d0, d1, d2) -> (d0, d1)>]
600      ins(%lhs, %rhs : tensor<4x1x2xf32>, tensor<1x64xf32>)
601      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
602  return
603}
604// -----
605
606func.func @mismatch_type_affine_map_and_operand_contraction(
607    %lhs: f32, %rhs: tensor<4x64xf32>, %init: tensor<4x64xf32>) {
608  // expected-error @+1 {{affine_map specifies shaped access while operand has non-shaped type}}
609  linalg.contract
610      indexing_maps = [affine_map<(d0, d1) -> (d0)>,
611                       affine_map<(d0, d1) -> (d0, d1)>,
612                       affine_map<(d0, d1) -> (d0, d1)>]
613      ins(%lhs, %rhs : f32, tensor<4x64xf32>)
614      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
615  return
616}
617
618// -----
619
620func.func @unused_iteration_space_dim_contraction(
621    %lhs: tensor<4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) {
622  // expected-error @+1 {{iteration space dim at index 3 not used to access any operand}}
623  linalg.contract
624      indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2)>,
625                       affine_map<(d0, d1, d2, d3) -> (d2, d1)>,
626                       affine_map<(d0, d1, d2, d3) -> (d0, d1)>]
627      ins(%lhs, %rhs : tensor<4x1xf32>, tensor<1x64xf32>)
628      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
629  return
630}
631
632// -----
633
634func.func @unused_iteration_space_dim_contraction(
635    %lhs: tensor<8x4x1xf32>, %rhs: tensor<1x64xf32>, %init: tensor<4x64xf32>) {
636  // expected-error @+1 {{iteration space dim at index 3 is neither a contracting dim nor of parallel iteration type}}
637  linalg.contract
638      indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>,
639                       affine_map<(d0, d1, d2, d3) -> (d2, d1)>,
640                       affine_map<(d0, d1, d2, d3) -> (d0, d1)>]
641      ins(%lhs, %rhs : tensor<8x4x1xf32>, tensor<1x64xf32>)
642      outs(%init : tensor<4x64xf32>) -> tensor<4x64xf32>
643  return
644}
645
646// -----
647
648func.func @invalid_static_2d_conv(%input : memref<1x3x4x2xf32>, %filter: memref<3x2x2x1xf32>, %output: memref<1x2x3x1xf32>) {
649  // expected-error @+1 {{inferred input/output operand #0 has shape's dimension #1 to be greater than or equal to 4, but found 3}}
650  linalg.conv_2d_nhwc_hwcf
651    { dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
652    ins(%input, %filter : memref<1x3x4x2xf32>, memref<3x2x2x1xf32>)
653    outs(%output : memref<1x2x3x1xf32>)
654  return
655}
656
657// -----
658
659#attrs = {
660        indexing_maps = [
661                affine_map<(i) -> (3 - i)>,
662                affine_map<(i) -> (i)>
663        ],
664        iterator_types = ["parallel"]
665}
666
667func.func @invalid_reverse(%A: memref<5xf32>, %B: memref<5xf32>) {
668  // expected-error @+1 {{unexpected result less than 0 at expression #0 in}}
669  linalg.generic #attrs ins(%A: memref<5xf32>) outs(%B: memref<5xf32>) {
670                ^bb0(%a: f32, %b: f32):
671                linalg.yield %a : f32
672        }
673        return
674}
675
676// -----
677
678func.func @map_binary_wrong_yield_operands(
679    %lhs: tensor<64xf32>, %rhs: tensor<64xf32>, %init: tensor<64xf32>)
680    -> tensor<64xf32> {
681   %add = linalg.map
682          ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
683          outs(%init:tensor<64xf32>)
684          (%lhs_elem: f32, %rhs_elem: f32) {
685            %0 = arith.addf %lhs_elem, %rhs_elem: f32
686            // expected-error @+1{{'linalg.yield' op expected number of yield values (2) to match the number of inits / outs operands of the enclosing LinalgOp (1)}}
687            linalg.yield %0, %0: f32, f32
688          }
689  func.return %add : tensor<64xf32>
690}
691
692// -----
693
694func.func @map_input_mapper_arity_mismatch(
695    %lhs: tensor<64xf32>, %rhs: tensor<64xf32>, %init: tensor<64xf32>)
696    -> tensor<64xf32> {
697  // expected-error@+1{{'linalg.map' op expects number of operands to match the arity of mapper, but got: 2 and 3}}
698  %add = linalg.map
699      ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
700      outs(%init:tensor<64xf32>)
701      (%lhs_elem: f32, %rhs_elem: f32, %extra_elem: f32) {
702        %0 = arith.addf %lhs_elem, %rhs_elem: f32
703        linalg.yield %0: f32
704      }
705  func.return %add : tensor<64xf32>
706}
707
708// -----
709
710func.func @map_input_mapper_type_mismatch(
711    %lhs: tensor<64xf32>, %rhs: tensor<64xf32>, %init: tensor<64xf32>)
712    -> tensor<64xf32> {
713    // expected-error@+1{{'linalg.map' op expected element type of input 'f32' to match bbArg type 'f64'}}
714  %add = linalg.map
715      ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
716      outs(%init:tensor<64xf32>)
717      (%lhs_elem: f64, %rhs_elem: f64) {
718        %0 = arith.addf %lhs_elem, %rhs_elem: f64
719        linalg.yield %0: f64
720      }
721  func.return %add : tensor<64xf32>
722}
723
724// -----
725
726func.func @map_input_output_shape_mismatch(
727    %lhs: tensor<64x64xf32>, %rhs: tensor<64x64xf32>, %init: tensor<32xf32>)
728    -> tensor<32xf32> {
729    // expected-error@+1{{'linalg.map' op expected shape of input (64, 64) to match shape of output (32)}}
730  %add = linalg.map
731      ins(%lhs, %rhs : tensor<64x64xf32>, tensor<64x64xf32>)
732      outs(%init:tensor<32xf32>)
733      (%lhs_elem: f32, %rhs_elem: f32) {
734        %0 = arith.addf %lhs_elem, %rhs_elem: f32
735        linalg.yield %0: f32
736      }
737  func.return %add : tensor<32xf32>
738}
739
740// -----
741
742func.func @map_no_operands1() {
743  // expected-error @+1 {{'linalg.map' op expected 1 or more operands, but found 0}}
744  linalg.map { arith.addf }
745}
746
747// -----
748
749func.func @map_no_operands2() {
750  // expected-error @+1 {{'linalg.map' op expected 1 or more operands, but found 0}}
751  "linalg.map"() ({
752    ^bb0:
753  }) : () -> ()
754}
755
756// -----
757
758func.func @map_no_operands3(
759    %lhs: tensor<64xf32>, %rhs: tensor<64xf32>, %init: tensor<64xf32>)
760    -> tensor<64xf32> {
761  // expected-error @+1 {{cannot name an operation with no results}}
762  %add = linalg.map { arith.addf }
763  func.return %add : tensor<64xf32>
764}
765
766// -----
767
768func.func @reduce_input_vs_init_dimension_mismatch(
769    %input: tensor<16x32x64xf32>,
770    %init: tensor<16x64xf32>)  -> tensor<16x64xf32> {
771  // expected-error @+1 {{'linalg.reduce' op init dimensions [16, 64] doesn't match input dimensions after reduction [16, 32]}}
772  %reduce = linalg.reduce
773      ins(%input:tensor<16x32x64xf32>)
774      outs(%init:tensor<16x64xf32>)
775      dimensions = [2]
776      (%in: f32, %out: f32) {
777        %0 = arith.addf %in, %out: f32
778        linalg.yield %0: f32
779      }
780  func.return %reduce : tensor<16x64xf32>
781}
782
783// -----
784
785func.func @reduce_dimensions_out_of_range(%input: tensor<16x32x64xf32>,
786    %init: tensor<16x64xf32>)  -> tensor<16x64xf32> {
787  // expected-error @+1 {{'linalg.reduce' op dimensions for reduction should be in the range [0, 2].}}
788  %reduce = linalg.reduce
789      ins(%input:tensor<16x32x64xf32>)
790      outs(%init:tensor<16x64xf32>)
791      dimensions = [3]
792      (%in: f32, %out: f32) {
793        %0 = arith.addf %in, %out: f32
794        linalg.yield %0: f32
795      }
796  func.return %reduce : tensor<16x64xf32>
797}
798
799// -----
800
801func.func @reduce_duplicate_dimensions(%input: tensor<16x32x64xf32>,
802    %init: tensor<16xf32>)  -> tensor<16xf32> {
803  // expected-error @+1 {{'linalg.reduce' op attribute 'dimensions' failed to satisfy constraint: i64 dense array attribute should be in increasing order}}
804  %reduce = linalg.reduce
805      ins(%input:tensor<16x32x64xf32>)
806      outs(%init:tensor<16xf32>)
807      dimensions = [1, 1]
808      (%in: f32, %out: f32) {
809        %0 = arith.addf %in, %out: f32
810        linalg.yield %0: f32
811      }
812  func.return %reduce : tensor<16xf32>
813}
814
815// -----
816
817func.func @reduce_non_increasing_dimensions(%input: tensor<16x32x64xf32>,
818    %init: tensor<16xf32>)  -> tensor<16xf32> {
819  // expected-error @+1 {{'linalg.reduce' op attribute 'dimensions' failed to satisfy constraint: i64 dense array attribute should be in increasing order}}
820  %reduce = linalg.reduce
821      ins(%input:tensor<16x32x64xf32>)
822      outs(%init:tensor<16xf32>)
823      dimensions = [2, 1]
824      (%in: f32, %out: f32) {
825        %0 = arith.addf %in, %out: f32
826        linalg.yield %0: f32
827      }
828  func.return %reduce : tensor<16xf32>
829}
830
831// -----
832
833func.func @reduce_reduced_input_init_rank_mismatch(%input: tensor<16x32x64xf32>,
834    %init: tensor<16x64xf32>)  -> tensor<16x64xf32> {
835  // expected-error @+1 {{'linalg.reduce' op number of dimensions after reduction 1 doesn't match the init rank 2}}
836  %reduce = linalg.reduce
837      ins(%input:tensor<16x32x64xf32>)
838      outs(%init:tensor<16x64xf32>)
839      dimensions = [1, 2]
840      (%in: f32, %out: f32) {
841        %0 = arith.addf %in, %out: f32
842        linalg.yield %0: f32
843      }
844  func.return %reduce : tensor<16x64xf32>
845}
846
847// -----
848
849func.func @reduce_wrong_number_of_block_arguments(
850    %input1: tensor<16x32x64xf32>,
851    %init1: tensor<16x64xf32>, %input2: tensor<16x32x64xf32>,
852    %init2: tensor<16x64xf32>)  -> (tensor<16x64xf32>, tensor<16x64xf32>) {
853  // expected-error @+1{{'linalg.reduce' op mismatching number of operands and block arguments}}
854  %reduce, %reduce2 = linalg.reduce
855      ins(%input1, %input2 : tensor<16x32x64xf32>, tensor<16x32x64xf32>)
856      outs(%init1, %init2 : tensor<16x64xf32>, tensor<16x64xf32>)
857      dimensions = [1]
858      (%in: f32, %out: f32) {
859        %0 = arith.addf %in, %out: f32
860        linalg.yield %0: f32
861      }
862  func.return %reduce, %reduce2 : tensor<16x64xf32>, tensor<16x64xf32>
863}
864
865// -----
866
867func.func @reduce_wrong_block_argument_input_type(
868    %input1: tensor<16x32x64xf32>,
869    %init1: tensor<16x64xf32>, %input2: tensor<16x32x64xf32>,
870    %init2: tensor<16x64xf32>)  -> (tensor<16x64xf32>, tensor<16x64xf32>) {
871  // expected-error @+1{{'linalg.reduce' op input element type 'f32' does not match corresponding block argument type 'f64'}}
872  %reduce, %reduce2 = linalg.reduce
873      ins(%input1, %input2 : tensor<16x32x64xf32>, tensor<16x32x64xf32>)
874      outs(%init1, %init2 : tensor<16x64xf32>, tensor<16x64xf32>)
875      dimensions = [1]
876      (%in1: f32, %in2: f64, %out1: f32, %out2: f64) {
877        %0 = arith.addf %in1, %out1: f32
878        %1 = arith.addf %in2, %out2: f64
879        linalg.yield %0, %1: f32, f64
880      }
881  func.return %reduce, %reduce2 : tensor<16x64xf32>, tensor<16x64xf32>
882}
883
884// -----
885
886func.func @reduce_wrong_block_argument_output_type(
887    %input1: tensor<16x32x64xf32>,
888    %init1: tensor<16x64xf32>, %input2: tensor<16x32x64xf32>,
889    %init2: tensor<16x64xf64>)  -> (tensor<16x64xf32>, tensor<16x64xf32>) {
890  // expected-error @+1{{'linalg.reduce' op output element type 'f64' does not match corresponding block argument type 'f32'}}
891  %reduce, %reduce2 = linalg.reduce
892      ins(%input1, %input2 : tensor<16x32x64xf32>, tensor<16x32x64xf32>)
893      outs(%init1, %init2 : tensor<16x64xf32>, tensor<16x64xf64>)
894      dimensions = [1]
895      (%in1: f32, %in2: f32, %out1: f32, %out2: f32) {
896        %0 = arith.addf %in1, %out1: f32
897        linalg.yield %0, %out2: f32, f32
898      }
899  func.return %reduce, %reduce2 : tensor<16x64xf32>, tensor<16x64xf64>
900}
901
902// -----
903
904func.func @reduce_different_input_shapes(%input1: tensor<16x32x64xf32>,
905    %init1: tensor<16x64xf32>, %input2: tensor<17x32x64xf32>,
906    %init2: tensor<17x64xf32>)  -> (tensor<16x64xf32>, tensor<17x64xf32>) {
907  // expected-error @+1{{'linalg.reduce' op expects all inputs to have the same shapes. Shape at input-index 1 is not equal to the shape at input-index 0.}}
908  %reduce, %reduce2 = linalg.reduce
909      ins(%input1, %input2 : tensor<16x32x64xf32>, tensor<17x32x64xf32>)
910      outs(%init1, %init2 : tensor<16x64xf32>, tensor<17x64xf32>)
911      dimensions = [1]
912      (%in1: f32, %in2: f32, %out1: f32, %out2: f32) {
913        %0 = arith.addf %in1, %out1: f32
914        %1 = arith.addf %in2, %out2: f32
915        linalg.yield %0, %1: f32, f32
916      }
917  func.return %reduce, %reduce2 : tensor<16x64xf32>, tensor<17x64xf32>
918}
919
920// -----
921
922func.func @reduce_different_output_shapes(%input1: tensor<16x32x64xf32>,
923    %init1: tensor<16x64xf32>, %input2: tensor<16x32x64xf32>,
924    %init2: tensor<17x64xf32>)  -> (tensor<16x64xf32>, tensor<17x64xf32>) {
925  // expected-error @+1{{'linalg.reduce' op expects all outputs to have the same shapes. Shape at output-index 1 is not equal to the shape at output-index 0.}}
926  %reduce, %reduce2 = linalg.reduce
927      ins(%input1, %input2 : tensor<16x32x64xf32>, tensor<16x32x64xf32>)
928      outs(%init1, %init2 : tensor<16x64xf32>, tensor<17x64xf32>)
929      dimensions = [1]
930      (%in1: f32, %in2: f32, %out1: f32, %out2: f32) {
931        %0 = arith.addf %in1, %out1: f32
932        %1 = arith.addf %in2, %out2: f32
933        linalg.yield %0, %1: f32, f32
934      }
935  func.return %reduce, %reduce2 : tensor<16x64xf32>, tensor<17x64xf32>
936}
937
938// -----
939
940func.func @transpose_invalid_permutation(%input: tensor<16x32x64xf32>,
941    %init: tensor<32x64x16xf32>) -> tensor<32x64x16xf32> {
942  // expected-error @+1 {{'linalg.transpose' op permutation is not valid}}
943  %transpose = linalg.transpose
944      ins(%input:tensor<16x32x64xf32>)
945      outs(%init:tensor<32x64x16xf32>)
946      permutation = [1, 1, 2]
947  func.return %transpose : tensor<32x64x16xf32>
948}
949
950// -----
951
952func.func @transpose_permutated_dims_mismatch(%input: tensor<16x32x64xf32>,
953    %init: tensor<32x64x16xf32>) -> tensor<32x64x16xf32> {
954  // expected-error @+1 {{'linalg.transpose' op dim(result, 0) = 32 doesn't match dim(input, permutation[0]) = 16}}
955  %transpose = linalg.transpose
956      ins(%input:tensor<16x32x64xf32>)
957      outs(%init:tensor<32x64x16xf32>)
958      permutation = [0, 1, 2]
959  func.return %transpose : tensor<32x64x16xf32>
960}
961
962// -----
963
964func.func @transpose_rank_permutation_size_mismatch(
965    %input: tensor<16x32x64xf32>,
966    %init: tensor<32x64x16xf32>) -> tensor<32x64x16xf32> {
967  // expected-error @+1 {{'linalg.transpose' op size of permutation 2 does not match the argument rank 3}}
968  %transpose = linalg.transpose
969      ins(%input:tensor<16x32x64xf32>)
970      outs(%init:tensor<32x64x16xf32>)
971      permutation = [1, 0]
972  func.return %transpose : tensor<32x64x16xf32>
973}
974
975// -----
976
977func.func @transpose_input_init_rank_mismatch(%input: tensor<16x32xf32>,
978    %init: tensor<32x64x16xf32>) -> tensor<32x64x16xf32> {
979  // expected-error @+1 {{'linalg.transpose' op input rank 2 does not match init rank 3}}
980  %transpose = linalg.transpose
981      ins(%input:tensor<16x32xf32>)
982      outs(%init:tensor<32x64x16xf32>)
983      permutation = [1, 0, 2]
984  func.return %transpose : tensor<32x64x16xf32>
985}
986
987// -----
988
989func.func @transpose_no_operands1() {
990  // expected-error @+1 {{'linalg.transpose' op expected 2 operands, but found 0}}
991  linalg.transpose permutation = [1, 0, 2]
992}
993
994// -----
995
996func.func @transpose_no_operands2() {
997  // expected-error @+1 {{'linalg.transpose' op expected 2 operands, but found 0}}
998  "linalg.transpose"() <{permutation = array<i64: 1, 0, 2>}> ({
999    ^bb0:
1000  }) : () -> ()
1001}
1002
1003// -----
1004
1005func.func @transpose_no_operands3() -> tensor<32x64x16xf32> {
1006  // expected-error @+1 {{cannot name an operation with no results}}
1007  %transpose = linalg.transpose permutation = [1, 0, 2]
1008  func.return %transpose : tensor<32x64x16xf32>
1009}
1010
1011// -----
1012
1013func.func @broadcast_input_dims_rank_mismatch(
1014    %input: tensor<4x16xf32>, %init: tensor<4x8x16xf32>)
1015    -> tensor<4x8x16xf32> {
1016  // expected-error @+1 {{'linalg.broadcast' op input rank plus added dimensions does not match init rank. }}
1017  %bcast = linalg.broadcast
1018      ins(%input:tensor<4x16xf32>)
1019      outs(%init:tensor<4x8x16xf32>)
1020      dimensions = [1, 2]
1021  func.return %bcast : tensor<4x8x16xf32>
1022}
1023
1024// -----
1025
1026func.func @broadcast_unsorted_dims(
1027    %input: tensor<4x16xf32>, %init: tensor<4x8x16xf32>)
1028    -> tensor<4x8x16xf32> {
1029  // expected-error @+1 {{'linalg.broadcast' op dimension 0 is out of range. expected range: [0, 2], got: 5}}
1030  %bcast = linalg.broadcast
1031      ins(%input:tensor<4x16xf32>)
1032      outs(%init:tensor<4x8x16xf32>)
1033      dimensions = [5]
1034  func.return %bcast : tensor<4x8x16xf32>
1035}
1036
1037// -----
1038
1039func.func @broadcast_mapped_dim_mismatch(
1040    %input: tensor<4x16xf32>, %init: tensor<5x8x16xf32>)
1041    -> tensor<5x8x16xf32> {
1042  // expected-error @+1 {{'linalg.broadcast' op input dim 0 should match init dim 0. input: 4, init: 5}}
1043  %bcast = linalg.broadcast
1044      ins(%input:tensor<4x16xf32>)
1045      outs(%init:tensor<5x8x16xf32>)
1046      dimensions = [1]
1047  func.return %bcast : tensor<5x8x16xf32>
1048}
1049
1050// -----
1051
1052func.func @broadcast_size_1_extension_not_supported(
1053    %input: tensor<1x16xf32>, %init: tensor<4x?x16xf32>)
1054    -> tensor<4x?x16xf32> {
1055  // expected-error @+1 {{'linalg.broadcast' op input dim 0 should match init dim 0. input: 1, init: 4}}
1056  %bcast = linalg.broadcast
1057      ins(%input:tensor<1x16xf32>)
1058      outs(%init:tensor<4x?x16xf32>)
1059      dimensions = [1]
1060  func.return %bcast : tensor<4x?x16xf32>
1061}
1062
1063// -----
1064
1065func.func @broadcast_no_operands1() {
1066  // expected-error @+1 {{'linalg.broadcast' op expected 2 operands, but found 0}}
1067  linalg.broadcast dimensions = [1]
1068}
1069
1070// -----
1071
1072func.func @broadcast_no_operands2() {
1073  // expected-error @+1 {{'linalg.broadcast' op expected 2 operands, but found 0}}
1074  "linalg.broadcast"() <{dimensions = array<i64: 1>}> ({
1075    ^bb0:
1076  }) : () -> ()
1077}
1078
1079// -----
1080
1081func.func @broadcast_no_operands3()
1082    -> tensor<4x?x16xf32> {
1083  // expected-error @+1 {{cannot name an operation with no results}}
1084  %broadcast = linalg.broadcast dimensions = [1]
1085  func.return %broadcast : tensor<32x64x16xf32>
1086}
1087
1088// -----
1089
1090func.func @missing_iterator_types() {
1091  // expected-error @below {{expected "iterator_types" array attribute}}
1092  linalg.generic {} ins() outs()
1093  return
1094}
1095
1096// -----
1097
1098func.func @illegal_softmax_output_shape(%arg0: tensor<2x16x32xf32>) -> tensor<2x16xf32> {
1099  %0 = tensor.empty() : tensor<2x16xf32>
1100  // expected-error @+1 {{incompatible output shape}}
1101  %1 = linalg.softmax dimension(2) ins(%arg0 : tensor<2x16x32xf32>)
1102                                   outs(%0: tensor<2x16xf32>)
1103    -> tensor<2x16xf32>
1104  return %1 : tensor<2x16xf32>
1105}
1106
1107// -----
1108
1109func.func @mmt4d_dims_mismatch(%A: tensor<16x16x8x1xf32>,
1110                               %B: tensor<16x16x8x1xf32>,
1111                               %C_in: tensor<16x16x8x1xf32>) -> tensor<16x16x8x1xf32> {
1112    // expected-error @+1 {{inferred input/output operand #2 has shape's dimension #3 to be 8, but found 1}}
1113    %res = linalg.mmt4d
1114                     ins(%A, %B: tensor<16x16x8x1xf32>, tensor<16x16x8x1xf32>)
1115                     outs(%C_in: tensor<16x16x8x1xf32>)
1116                     -> tensor<16x16x8x1xf32>
1117    return %res : tensor<16x16x8x1xf32>
1118}
1119
1120// -----
1121
1122func.func @mmt4d_rank_mismatch(%A: tensor<16x16x8x1xf32>,
1123                 %B: tensor<16x16x8x1xf32>,
1124                 %C_in: tensor<8x8xf32>) -> tensor<8x8xf32> {
1125    // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #2 (4)}}
1126    %res = linalg.mmt4d
1127                     ins(%A, %B: tensor<16x16x8x1xf32>, tensor<16x16x8x1xf32>)
1128                     outs(%C_in: tensor<8x8xf32>)
1129                     -> tensor<8x8xf32>
1130    return %res : tensor<8x8xf32>
1131}
1132
1133// -----
1134
1135func.func @mixed_semantics(%a: tensor<?x?xf32>, %b: tensor<?x?xf32>, %c: memref<?x?xf32>) {
1136  // expected-error @+1 {{expected to have pure tensor or buffer semantics}}
1137  linalg.matmul ins(%a, %b: tensor<?x?xf32>, tensor<?x?xf32>)
1138               outs(%c: memref<?x?xf32>)
1139  return
1140}
1141
1142// -----
1143
1144func.func @winograd_filter_transform_height(%arg0: tensor<2x4x3x5xf32>, %arg1: tensor<6x6x5x2xf32>) -> tensor<6x6x5x2xf32> {
1145  // expected-error @+1 {{expect filter height either equals to r or 1}}
1146  %0 = linalg.winograd_filter_transform m(4) r(3) ins(%arg0 : tensor<2x4x3x5xf32>) outs(%arg1 : tensor<6x6x5x2xf32>) -> tensor<6x6x5x2xf32>
1147  return %0 : tensor<6x6x5x2xf32>
1148}
1149
1150// -----
1151
1152func.func @winograd_filter_transform_width(%arg0: tensor<2x3x4x5xf32>, %arg1: tensor<6x6x5x2xf32>) -> tensor<6x6x5x2xf32> {
1153  // expected-error @+1 {{expect filter width either equals to r or 1}}
1154  %0 = linalg.winograd_filter_transform m(4) r(3) ins(%arg0 : tensor<2x3x4x5xf32>) outs(%arg1 : tensor<6x6x5x2xf32>) -> tensor<6x6x5x2xf32>
1155  return %0 : tensor<6x6x5x2xf32>
1156}
1157
1158// -----
1159
1160func.func @winograd_filter_transform(%arg0: tensor<2x1x1x5xf32>, %arg1: tensor<6x6x5x2xf32>) -> tensor<6x6x5x2xf32> {
1161  // expected-error @+1 {{expect either filter height or width equals to r}}
1162  %0 = linalg.winograd_filter_transform m(4) r(3) ins(%arg0 : tensor<2x1x1x5xf32>) outs(%arg1 : tensor<6x6x5x2xf32>) -> tensor<6x6x5x2xf32>
1163  return %0 : tensor<6x6x5x2xf32>
1164}
1165
1166// -----
1167
1168func.func @winograd_filter_dyn(%arg0: tensor<?x3x3x?xf32>, %arg1: tensor<6x5x?x?xf32>) -> tensor<6x5x?x?xf32> {
1169  // expected-error @+1 {{the output shape is not expected}}
1170  %0 = linalg.winograd_filter_transform m(4) r(3) ins(%arg0 : tensor<?x3x3x?xf32>) outs(%arg1 : tensor<6x5x?x?xf32>) -> tensor<6x5x?x?xf32>
1171  return %0 : tensor<6x5x?x?xf32>
1172}
1173
1174// -----
1175
1176func.func @winograd_input_transform_height(%arg0: tensor<2x13x14x5xf32>, %arg1: tensor<6x6x3x3x2x5xf32>) -> tensor<6x6x3x3x2x5xf32> {
1177  // expected-error @+1 {{the output shape is not expected}}
1178  %0 = linalg.winograd_input_transform m(4) r(3) ins(%arg0 : tensor<2x13x14x5xf32>) outs(%arg1 : tensor<6x6x3x3x2x5xf32>) -> tensor<6x6x3x3x2x5xf32>
1179  return %0 : tensor<6x6x3x3x2x5xf32>
1180}
1181
1182// -----
1183
1184func.func @winograd_input_transform_width(%arg0: tensor<2x14x13x5xf32>, %arg1: tensor<6x6x3x3x2x5xf32>) -> tensor<6x6x3x3x2x5xf32> {
1185  // expected-error @+1 {{the output shape is not expected}}
1186  %0 = linalg.winograd_input_transform m(4) r(3) ins(%arg0 : tensor<2x14x13x5xf32>) outs(%arg1 : tensor<6x6x3x3x2x5xf32>) -> tensor<6x6x3x3x2x5xf32>
1187  return %0 : tensor<6x6x3x3x2x5xf32>
1188}
1189
1190// -----
1191
1192func.func @winograd_input_transform_output_tileH(%arg0: tensor<2x14x14x5xf32>, %arg1: tensor<6x6x2x3x2x5xf32>) -> tensor<6x6x2x3x2x5xf32> {
1193  // expected-error @+1 {{the output shape is not expected}}
1194  %0 = linalg.winograd_input_transform m(4) r(3) ins(%arg0 : tensor<2x14x14x5xf32>) outs(%arg1 : tensor<6x6x2x3x2x5xf32>) -> tensor<6x6x2x3x2x5xf32>
1195  return %0 : tensor<6x6x2x3x2x5xf32>
1196}
1197
1198// -----
1199
1200func.func @winograd_input_transform_output_tileW(%arg0: tensor<2x14x14x5xf32>, %arg1: tensor<6x6x3x2x2x5xf32>) -> tensor<6x6x3x2x2x5xf32> {
1201  // expected-error @+1 {{the output shape is not expected}}
1202  %0 = linalg.winograd_input_transform m(4) r(3) ins(%arg0 : tensor<2x14x14x5xf32>) outs(%arg1 : tensor<6x6x3x2x2x5xf32>) -> tensor<6x6x3x2x2x5xf32>
1203  return %0 : tensor<6x6x3x2x2x5xf32>
1204}
1205
1206// -----
1207
1208func.func @winograd_input_transform_output_height(%arg0: tensor<2x14x14x5xf32>, %arg1: tensor<5x6x3x3x2x5xf32>) -> tensor<5x6x3x3x2x5xf32> {
1209  // expected-error @+1 {{the output shape is not expected}}
1210  %0 = linalg.winograd_input_transform m(4) r(3) ins(%arg0 : tensor<2x14x14x5xf32>) outs(%arg1 : tensor<5x6x3x3x2x5xf32>) -> tensor<5x6x3x3x2x5xf32>
1211  return %0 : tensor<5x6x3x3x2x5xf32>
1212}
1213
1214// -----
1215
1216func.func @winograd_input_transform_output_width(%arg0: tensor<2x14x14x5xf32>, %arg1: tensor<6x5x3x3x2x5xf32>) -> tensor<6x5x3x3x2x5xf32> {
1217  // expected-error @+1 {{the output shape is not expected}}
1218  %0 = linalg.winograd_input_transform m(4) r(3) ins(%arg0 : tensor<2x14x14x5xf32>) outs(%arg1 : tensor<6x5x3x3x2x5xf32>) -> tensor<6x5x3x3x2x5xf32>
1219  return %0 : tensor<6x5x3x3x2x5xf32>
1220}
1221
1222// -----
1223
1224func.func @winograd_input_dyn(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<6x5x?x?x?x?xf32>) -> tensor<6x5x?x?x?x?xf32> {
1225  // expected-error @+1 {{the output shape is not expected}}
1226  %0 = linalg.winograd_input_transform m(4) r(3) ins(%arg0 : tensor<?x?x?x?xf32>) outs(%arg1 : tensor<6x5x?x?x?x?xf32>) -> tensor<6x5x?x?x?x?xf32>
1227  return %0 : tensor<6x5x?x?x?x?xf32>
1228}
1229
1230// -----
1231
1232func.func @winograd_output_transform_input_height(%arg0: tensor<5x6x3x3x2x2xf32>, %arg1: tensor<2x12x12x2xf32>) -> tensor<2x12x12x2xf32> {
1233  // expected-error @+1 {{expect input height equals to input tile size}}
1234  %0 = linalg.winograd_output_transform m(4) r(3) ins(%arg0 : tensor<5x6x3x3x2x2xf32>) outs(%arg1 : tensor<2x12x12x2xf32>) -> tensor<2x12x12x2xf32>
1235  return %0 : tensor<2x12x12x2xf32>
1236}
1237
1238// -----
1239
1240func.func @winograd_output_transform_input_width(%arg0: tensor<6x5x3x3x2x2xf32>, %arg1: tensor<2x12x12x2xf32>) -> tensor<2x12x12x2xf32> {
1241  // expected-error @+1 {{expect input width equals to input tile size}}
1242  %0 = linalg.winograd_output_transform m(4) r(3) ins(%arg0 : tensor<6x5x3x3x2x2xf32>) outs(%arg1 : tensor<2x12x12x2xf32>) -> tensor<2x12x12x2xf32>
1243  return %0 : tensor<2x12x12x2xf32>
1244}
1245
1246// -----
1247
1248func.func @winograd_output_transform_output_height(%arg0: tensor<6x6x3x3x2x2xf32>, %arg1: tensor<2x11x12x2xf32>) -> tensor<2x11x12x2xf32> {
1249  // expected-error @+1 {{the output shape is not expected}}
1250  %0 = linalg.winograd_output_transform m(4) r(3) ins(%arg0 : tensor<6x6x3x3x2x2xf32>) outs(%arg1 : tensor<2x11x12x2xf32>) -> tensor<2x11x12x2xf32>
1251  return %0 : tensor<2x11x12x2xf32>
1252}
1253
1254// -----
1255
1256func.func @winograd_output_transform_output_width(%arg0: tensor<6x6x3x3x2x2xf32>, %arg1: tensor<2x12x11x2xf32>) -> tensor<2x12x11x2xf32> {
1257  // expected-error @+1 {{the output shape is not expected}}
1258  %0 = linalg.winograd_output_transform m(4) r(3) ins(%arg0 : tensor<6x6x3x3x2x2xf32>) outs(%arg1 : tensor<2x12x11x2xf32>) -> tensor<2x12x11x2xf32>
1259  return %0 : tensor<2x12x11x2xf32>
1260}
1261