xref: /llvm-project/mlir/test/Dialect/Tensor/invalid.mlir (revision 990837f91de329b1e045f90fadb86ffe21611d9a)
1// RUN: mlir-opt %s -split-input-file -verify-diagnostics
2
3// Asking the dimension of a 0-D shape doesn't make sense.
4func.func @dim_0_ranked(%arg : tensor<f32>, %arg1 : index) {
5  tensor.dim %arg, %arg1 : tensor<f32> // expected-error {{'tensor.dim' op operand #0 must be non-0-ranked or unranked tensor, but got 'tensor<f32>'}}
6  return
7}
8
9// -----
10
11func.func @tensor.cast_mismatching_constants(%arg0: tensor<1xf32>) {
12  // expected-error@+1 {{operand type 'tensor<1xf32>' and result type 'tensor<2xf32>' are cast incompatible}}
13  %0 = tensor.cast %arg0 : tensor<1xf32> to tensor<2xf32>
14  return
15}
16
17// -----
18
19func.func @concat_empty() {
20  // expected-error@+1 {{requires at least one input}}
21  %0 = tensor.concat dim(0) : () -> tensor<1x2x3xf32>
22  return
23}
24
25// -----
26
27func.func @concat_rank_mismatch(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) {
28  // expected-error@+1 {{rank of concatenated inputs must match result rank}}
29  %0 = tensor.concat dim(0) %arg0, %arg1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
30  return
31}
32
33// -----
34
35func.func @concat_dim_out_of_range(%arg0: tensor<3xf32>) {
36  // expected-error@+1 {{concatenation dim must be less than the tensor rank}}
37  %0 = tensor.concat dim(1) %arg0 : (tensor<3xf32>) -> tensor<3xf32>
38  return
39}
40
41// -----
42
43func.func @concat_element_type_mismatch(%arg0: tensor<3xf32>, %arg1: tensor<3xi32>) {
44  // expected-error@+1 {{inputs and result element type must match}}
45  %0 = tensor.concat dim(0) %arg0, %arg1 : (tensor<3xf32>, tensor<3xi32>) -> tensor<3xf32>
46  return
47}
48
49// -----
50
51func.func @concat_incompatible_input_types(%arg0: tensor<3x4xf32>, %arg1: tensor<4x5xf32>) {
52  // expected-error@+1 {{static concatenation size mismatch along non-concatenated dimension 1}}
53  %0 = tensor.concat dim(0) %arg0, %arg1 : (tensor<3x4xf32>, tensor<4x5xf32>) -> tensor<7x5xf32>
54  return
55}
56
57// -----
58
59func.func @concat_static_shape_mismatch(%arg0: tensor<3xf32>) {
60  // expected-error@+1 {{result type 'tensor<7xf32>'does not match inferred shape 'tensor<6xf32>' static sizes}}
61  %0 = tensor.concat dim(0) %arg0, %arg0 : (tensor<3xf32>, tensor<3xf32>) -> tensor<7xf32>
62  return
63}
64
65// -----
66
67func.func @extract_too_many_indices(%arg0: tensor<?xf32>) {
68  // expected-error@+1 {{incorrect number of indices for extract_element}}
69  %0 = tensor.extract %arg0[] : tensor<?xf32>
70  return
71}
72
73// -----
74
75func.func @insert_too_many_indices(%arg0: f32, %arg1: tensor<?xf32>) {
76  // expected-error@+1 {{incorrect number of indices}}
77  %0 = tensor.insert %arg0 into %arg1[] : tensor<?xf32>
78  return
79}
80
81// -----
82
83func.func @tensor.from_elements_wrong_result_type() {
84  // expected-error@+2 {{'tensor.from_elements' invalid kind of type specified}}
85  %c0 = arith.constant 0 : i32
86  %0 = tensor.from_elements %c0 : tensor<*xi32>
87  return
88}
89
90// -----
91
92func.func @tensor.from_elements_wrong_elements_count() {
93  // expected-error@+2 {{number of operands and types do not match: got 1 operands and 2 types}}
94  %c0 = arith.constant 0 : index
95  %0 = tensor.from_elements %c0 : tensor<2xindex>
96  return
97}
98
99// -----
100
101func.func @tensor.generate(%m : index)
102    -> tensor<?x3x?xf32> {
103  // expected-error @+1 {{must have as many index operands as dynamic extents in the result type}}
104  %tnsr = tensor.generate %m {
105    ^bb0(%i : index, %j : index, %k : index):
106      %elem = arith.constant 8.0 : f32
107      tensor.yield %elem : f32
108  } : tensor<?x3x?xf32>
109  return %tnsr : tensor<?x3x?xf32>
110}
111
112// -----
113
114func.func @tensor.generate(%m : index, %n : index)
115    -> tensor<?x3x?xf32> {
116  // expected-error @+1 {{must have one body argument per input dimension}}
117  %tnsr = tensor.generate %m, %n {
118    ^bb0(%i : index, %j : index):
119      %elem = arith.constant 8.0 : f32
120      tensor.yield %elem : f32
121  } : tensor<?x3x?xf32>
122  return %tnsr : tensor<?x3x?xf32>
123}
124
125// -----
126
127func.func @tensor.generate(%m : index, %n : index)
128    -> tensor<?x3x?xf32> {
129  // expected-error @+1 {{all body arguments must be index}}
130  %tnsr = tensor.generate %m, %n {
131    ^bb0(%i : index, %j : index, %k : i64):
132      %elem = arith.constant 8.0 : f32
133      tensor.yield %elem : f32
134  } : tensor<?x3x?xf32>
135  return %tnsr : tensor<?x3x?xf32>
136}
137
138// -----
139
140func.func @tensor.generate(%m : index, %n : index)
141    -> tensor<?x3x?xf32> {
142  // expected-error @+4 {{'func.return' op expects parent op 'func.func'}}
143  %tnsr = tensor.generate %m, %n {
144    ^bb0(%i : index, %j : index, %k : index):
145      %elem = arith.constant 8.0 : f32
146      func.return %elem : f32
147  } : tensor<?x3x?xf32>
148  return %tnsr : tensor<?x3x?xf32>
149}
150
151// -----
152
153func.func @tensor.generate(%m : index, %n : index)
154    -> tensor<?x3x?xf32> {
155  // expected-error @+1 {{body must be terminated with a `yield` operation of the tensor element type}}
156  %tnsr = tensor.generate %m, %n {
157    ^bb0(%i : index, %j : index, %k : index):
158      %elem = arith.constant 8 : i32
159      tensor.yield %elem : i32
160  } : tensor<?x3x?xf32>
161  return %tnsr : tensor<?x3x?xf32>
162}
163
164// -----
165
166func.func @tensor.reshape_element_type_mismatch(
167       %buf: tensor<*xf32>, %shape: tensor<1xi32>) {
168  // expected-error @+1 {{element types of source and destination tensor types should be the same}}
169  tensor.reshape %buf(%shape) : (tensor<*xf32>, tensor<1xi32>) -> tensor<?xi32>
170}
171
172// -----
173
174func.func @tensor.reshape_dst_ranked_shape_unranked(
175       %buf: tensor<*xf32>, %shape: tensor<?xi32>) {
176  // expected-error @+1 {{cannot use shape operand with dynamic length to reshape to statically-ranked tensor type}}
177  tensor.reshape %buf(%shape) : (tensor<*xf32>, tensor<?xi32>) -> tensor<?xf32>
178}
179
180// -----
181
182func.func @tensor.reshape_dst_shape_rank_mismatch(
183       %buf: tensor<*xf32>, %shape: tensor<1xi32>) {
184  // expected-error @+1 {{length of shape operand differs from the result's tensor rank}}
185  tensor.reshape %buf(%shape)
186    : (tensor<*xf32>, tensor<1xi32>) -> tensor<?x?xf32>
187}
188
189// -----
190
191func.func @tensor.reshape_num_elements_mismatch(
192       %buf: tensor<1xf32>, %shape: tensor<1xi32>) {
193  // expected-error @+1 {{source and destination tensor should have the same number of elements}}
194  tensor.reshape %buf(%shape)
195    : (tensor<1xf32>, tensor<1xi32>) -> tensor<10xf32>
196}
197
198// -----
199
200func.func @extract_slice_wrong_result_rank(%t: tensor<?xf32>, %idx : index) {
201  // expected-error @+1 {{expected rank to be smaller or equal to the other rank.}}
202  %0 = tensor.extract_slice %t[0][4][1] : tensor<?xf32> to tensor<?x?xf32>
203  return
204}
205
206// -----
207
208func.func @extract_slice_wrong_result_rank(%t: tensor<?xf32>, %idx : index) {
209  // expected-error @+1 {{expected element type to be 'f32'}}
210  %0 = tensor.extract_slice %t[0][4][1] : tensor<?xf32> to tensor<4xi8>
211  return
212}
213
214
215// -----
216
217func.func @extract_slice_size_and_output_dim_mismatch_static_size(%t: tensor<16xf32>) {
218  // expected-error @+1 {{expected type to be 'tensor<4xf32>' or a rank-reduced version. (size mismatch)}}
219  %0 = tensor.extract_slice %t[0][4][1]
220    : tensor<16xf32> to tensor<6xf32>
221  return
222}
223
224// -----
225
226func.func @extract_slice_size_and_output_dim_mismatch_dynamic_size(%t: tensor<?xf32>, %idx : index) {
227  // expected-error @+2 {{expected type to be 'tensor<?xf32>' or a rank-reduced version. (size mismatch)}}
228  %c4 = arith.constant 4 : index
229  %0 = tensor.extract_slice %t[0][%c4][1] : tensor<?xf32> to tensor<4xi8>
230  return
231}
232
233// -----
234
235func.func @extract_slice_wrong_static_type(%t: tensor<8x16x4xf32>, %idx : index) {
236  // expected-error @+1 {{expected type to be 'tensor<?x4x4xf32>' or a rank-reduced version. (size mismatch)}}
237  %0 = tensor.extract_slice %t[0, 0, 0][%idx, 4, 4][1, 1, 1]
238    : tensor<8x16x4xf32> to tensor<4x4x4xf32>
239  return
240}
241
242// -----
243
244func.func @extract_slice_wrong_dynamic_type(%t: tensor<8x16x4xf32>, %idx : index) {
245  // expected-error @+1 {{expected type to be 'tensor<4x4x4xf32>' or a rank-reduced version. (size mismatch)}}
246  %0 = tensor.extract_slice %t[0, 2, 0][4, 4, 4][1, 1, 1]
247    : tensor<8x16x4xf32> to tensor<?x4x4xf32>
248  return
249}
250
251// -----
252
253func.func @illegal_num_offsets(%arg0 : tensor<?x?x?xf32>, %arg1 : index, %arg2 : index) {
254  // expected-error@+1 {{expected 3 offset values}}
255  %0 = tensor.extract_slice %arg0[0, 0] [%arg1, %arg2] [1, 1] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
256  return
257}
258
259// -----
260
261func.func @insert_slice_wrong_result_rank(%t1: tensor<?xf32>, %t2: tensor<?x?xf32>, %idx : index) {
262  // expected-error @+1 {{expected rank to be smaller or equal to the other rank.}}
263  %0 = tensor.insert_slice %t2 into %t1[0][4][1] : tensor<?x?xf32> into tensor<?xf32>
264
265  return
266}
267
268// -----
269
270func.func @insert_slice_wrong_result_rank(%t1: tensor<4xi8>, %t2: tensor<?xf32>, %idx : index) {
271  // expected-error @+1 {{expected element type to be 'f32'}}
272  %0 = tensor.insert_slice %t1 into %t2[0][4][1] : tensor<4xi8> into tensor<?xf32>
273
274  return
275}
276
277// -----
278
279func.func @insert_slice_wrong_static_type(%t1: tensor<4x4x4xf32>, %t2: tensor<8x16x4xf32>, %idx : index) {
280  // expected-error @+1 {{expected type to be 'tensor<?x4x4xf32>' or a rank-reduced version. (size mismatch)}}
281  %0 = tensor.insert_slice %t1 into %t2[0, 0, 0][%idx, 4, 4][1, 1, 1]
282    : tensor<4x4x4xf32> into tensor<8x16x4xf32>
283
284  return
285}
286
287// -----
288
289func.func @insert_slice_wrong_dynamic_type(%t1: tensor<?x4x4xf32>, %t2: tensor<8x16x4xf32>, %idx : index) {
290  // expected-error @+1 {{expected type to be 'tensor<4x4x4xf32>' or a rank-reduced version. (size mismatch)}}
291  %0 = tensor.insert_slice %t1 into %t2[0, 2, 0][4, 4, 4][1, 1, 1]
292    : tensor<?x4x4xf32> into tensor<8x16x4xf32>
293
294  return
295}
296
297// -----
298
299func.func @illegal_expanding_reshape_static_tensor
300    (%arg0: tensor<2x3x20xf32>) -> tensor<2x3x2x4x5xf32> {
301  // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}}
302  %0 = tensor.expand_shape %arg0 [[0], [1], [2, 3, 4]] output_shape [2, 3, 2, 4, 5]
303      : tensor<2x3x20xf32> into tensor<2x3x2x4x5xf32>
304  return %0 : tensor<2x3x2x4x5xf32>
305}
306
307// -----
308
309func.func @illegal_collapsing_reshape_static_tensor
310    (%arg0: tensor<2x3x2x4x5xf32>) -> tensor<2x3x20xf32> {
311  // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}}
312  %0 = tensor.collapse_shape %arg0 [[0], [1], [2, 3, 4]]
313      : tensor<2x3x2x4x5xf32> into tensor<2x3x20xf32>
314  return %0 : tensor<2x3x20xf32>
315}
316
317// -----
318
319func.func @illegal_expanding_reshape_mixed_tensor(%arg0 : tensor<?x?xf32>, %sz0: index)
320    -> tensor<?x4x5xf32> {
321  // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}}
322  %0 = tensor.expand_shape %arg0 [[0, 1], [2]] output_shape [%sz0, 4, 5]
323      : tensor<?x?xf32> into tensor<?x4x5xf32>
324  return %0 : tensor<?x4x5xf32>
325}
326
327// -----
328
329func.func @illegal_expanding_reshape_mixed_tensor_2(%arg0 : tensor<?x?xf32>, %sz0: index)
330    -> tensor<?x4x5xf32> {
331  // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}}
332  %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [%sz0, 4, 5]
333      : tensor<?x?xf32> into tensor<?x4x5xf32>
334  return %0 : tensor<?x4x5xf32>
335}
336
337// -----
338
339func.func @expand_shape_illegal_output_shape(%arg0: tensor<2xf32>) {
340  // expected-error @+1 {{expected number of static shape dims to be equal to the output rank (3) but found 2 inputs instead}}
341  %0 = tensor.expand_shape %arg0 [[0, 1, 2]] output_shape [1, 2] : tensor<2xf32> into tensor<1x1x2xf32>
342  return
343}
344
345
346// -----
347
348func.func @illegal_collapsing_reshape_mixed_tensor(%arg0 : tensor<?x4x5xf32>) -> tensor<?x?xf32> {
349  // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}}
350  %0 = tensor.collapse_shape %arg0 [[0, 1], [2]]
351      : tensor<?x4x5xf32> into tensor<?x?xf32>
352  return %0 : tensor<?x?xf32>
353}
354
355// -----
356
357func.func @illegal_collapsing_reshape_mixed_tensor_2(%arg0 : tensor<?x4x5xf32>)
358    -> tensor<?x?xf32> {
359  // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}}
360  %0 = tensor.collapse_shape %arg0 [[0], [1, 2]]
361      : tensor<?x4x5xf32> into tensor<?x?xf32>
362  return %0 : tensor<?x?xf32>
363}
364
365// -----
366
367func.func @rank(%0: f32) {
368  // expected-error@+1 {{'tensor.rank' op operand #0 must be tensor of any type values}}
369  "tensor.rank"(%0): (f32)->index
370  return
371}
372
373// -----
374
375func.func @illegal_num_offsets(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?x?xf32>,
376    %arg2 : index, %arg3 : index) {
377  // expected-error@+1 {{expected 3 offset values}}
378  %0 = tensor.insert_slice %arg0 into %arg1[0, 0] [%arg2, %arg3] [1, 1] : tensor<?x?xf32> into tensor<?x?x?xf32>
379  return
380}
381
382// -----
383
384
385func.func @pad_result_type(%arg0: tensor<?x2x3x4xi32>, %arg1: index, %arg2: i32) -> tensor<?x?x?x8xf32> {
386  // expected-error @+1 {{specified type 'tensor<?x?x?x8xf32>' does not match the inferred type 'tensor<?x?x?x9xi32>}}
387  %0 = tensor.pad %arg0 low[1, %arg1, 2, 2] high[1, 2, %arg1, 3] {
388  ^bb0(%arg3: index, %arg4: index):
389    tensor.yield %arg2 : i32
390  } : tensor<?x2x3x4xi32> to tensor<?x?x?x8xf32>
391  return %0 : tensor<?x?x?x8xf32>
392}
393
394// -----
395
396func.func @pad_number_of_block_args(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
397  // expected-error @+1 {{expected the block to have 2 arguments}}
398  %0 = tensor.pad %arg0 low[1, 2] high[2, 3] {
399  ^bb0(%arg2: index, %arg3: index, %arg4: index):
400    tensor.yield %arg1 : i32
401  } : tensor<?x4xi32> to tensor<?x9xi32>
402  return %0 : tensor<?x9xi32>
403}
404
405// -----
406
407func.func @pad_block_args(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
408  // expected-error @+1 {{op expected block argument 1 to be an index}}
409  %0 = tensor.pad %arg0 low[1, 2] high[2, 3] {
410  ^bb0(%arg2: i32, %arg3: i32):
411    tensor.yield %arg1 : i32
412  } : tensor<?x4xi32> to tensor<?x9xi32>
413  return %0 : tensor<?x9xi32>
414}
415
416// -----
417
418func.func @pad_yield_type(%arg0: tensor<?x4xi32>, %arg1: i8) -> tensor<?x9xi32> {
419  // expected-error @+1 {{op expected yield type to match shape element type}}
420  %0 = tensor.pad %arg0 low[1, 2] high[2, 3] {
421  ^bb0(%arg2: index, %arg3: index):
422    tensor.yield %arg1 : i8
423  } : tensor<?x4xi32> to tensor<?x9xi32>
424  return %0 : tensor<?x9xi32>
425}
426
427// -----
428
429func.func @invalid_splat(%v : f32) {
430  // expected-error@+1 {{invalid kind of type specified}}
431  tensor.splat %v : memref<8xf32>
432  return
433}
434
435// -----
436
437func.func @invalid_splat(%v : vector<8xf32>) {
438  // expected-error@+1 {{must be integer/index/float type}}
439  %w = tensor.splat %v : tensor<8xvector<8xf32>>
440  return
441}
442
443// -----
444
445func.func @invalid_splat(%v: f32, %m: index) {
446  // expected-error@+1 {{incorrect number of dynamic sizes, has 1, expected 2}}
447  %w = tensor.splat %v[%m] : tensor<?x8x?xf32>
448  return
449}
450
451// -----
452
453func.func @gather_empty_dims(
454    %source : tensor<4x5x6xf32>, %indices: tensor<1x2x3xindex>) {
455  // expected-error@+1 {{gather_dims must be non-empty}}
456  %out = tensor.gather %source[%indices] gather_dims([]):
457    (tensor<4x5x6xf32>, tensor<1x2x3xindex>) -> tensor<1x2xf32>
458  return
459}
460
461// -----
462
463func.func @gather_coordinate_rank_overflow(
464    %source : tensor<4x5x6xf32>, %indices: tensor<1x2x3xindex>) {
465  // expected-error@+1 {{gather_dims overflow source rank}}
466  %out = tensor.gather %source[%indices] gather_dims([0, 1, 2, 3]):
467    (tensor<4x5x6xf32>, tensor<1x2x3xindex>) -> tensor<1x2xf32>
468  return
469}
470
471// -----
472
473func.func @gather_coordinate_rank_mismatch0(
474    %source: tensor<4x5x6xf32>, %indices: tensor<index>) {
475  // expected-error@+1 {{gather_dims length must match the size of last dimension of indices}}
476  %out = tensor.gather %source[%indices] gather_dims([0, 1, 2]):
477    (tensor<4x5x6xf32>, tensor<index>) -> tensor<1x2xf32>
478}
479
480// -----
481
482func.func @gather_coordinate_rank_mismatch1(
483    %source: tensor<4x5x6xf32>, %indices: tensor<1x2x2xindex>) {
484  // expected-error@+1 {{gather_dims length must match the size of last dimension of indices}}
485  %out = tensor.gather %source[%indices] gather_dims([0, 1, 2]):
486    (tensor<4x5x6xf32>, tensor<1x2x2xindex>) -> tensor<1x2xf32>
487}
488
489// -----
490
491func.func @gather_coordinate_negative(
492    %source : tensor<4x5x6xf32>, %indices: tensor<1x2x1xindex>) {
493  // expected-error@+1 {{gather_dims value must be non-negative}}
494  %out = tensor.gather %source[%indices] gather_dims([-1]):
495    (tensor<4x5x6xf32>, tensor<1x2x1xindex>) -> tensor<1x2x1xf32>
496  return
497}
498
499// -----
500
501func.func @gather_coordinate_overflow(
502    %source : tensor<4x5x6xf32>, %indices: tensor<1x2x1xindex>) {
503  // expected-error@+1 {{gather_dims value must be smaller than source rank}}
504  %out = tensor.gather %source[%indices] gather_dims([42]):
505    (tensor<4x5x6xf32>, tensor<1x2x1xindex>) -> tensor<1x2x1xf32>
506  return
507}
508
509// -----
510
511func.func @gather_coordinate_increase(
512    %source : tensor<4x5x6xf32>, %indices: tensor<1x2x2xindex>) {
513  // expected-error@+1 {{gather_dims values must be strictly increasing}}
514  %out = tensor.gather %source[%indices] gather_dims([1, 0]):
515    (tensor<4x5x6xf32>, tensor<1x2x2xindex>) -> tensor<1x2x1x1xf32>
516  return
517}
518
519// -----
520
521func.func @gather_wrong_result_type(
522    %source : tensor<4x5x6xf32>, %indices: tensor<1x2x2xindex>) {
523  // expected-error@+1 {{result type mismatch: expected 'tensor<1x2x1x5x1xf32>' or its rank-reduced variant 'tensor<1x2x5xf32>' (got: 'tensor<1x2x1xf32>')}}
524  %out = tensor.gather %source[%indices] gather_dims([0, 2]):
525    (tensor<4x5x6xf32>, tensor<1x2x2xindex>) -> tensor<1x2x1xf32>
526  return
527}
528
529// -----
530
531func.func @scatter_empty_dims(
532    %source : tensor<f32>,
533    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x3xindex>) {
534  // expected-error@+1 {{scatter_dims must be non-empty}}
535  %out = tensor.scatter %source into %dest[%indices] scatter_dims([]) unique:
536    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x3xindex>) -> tensor<1x2xf32>
537  return
538}
539
540// -----
541
542func.func @scatter_coordinate_rank_overflow(
543    %source : tensor<f32>,
544    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x3xindex>) {
545  // expected-error@+1 {{scatter_dims overflow dest rank}}
546  %out = tensor.scatter %source into %dest[%indices] scatter_dims([0, 1, 2, 3]) unique:
547    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x3xindex>) -> tensor<1x2xf32>
548  return
549}
550
551// -----
552
553func.func @scatter_coordinate_rank_mismatch0(
554    %source : tensor<f32>,
555    %dest : tensor<4x5x6xf32>, %indices: tensor<index>) {
556  // expected-error@+1 {{scatter_dims length must match the size of last dimension of indices}}
557  %out = tensor.scatter %source into %dest[%indices] scatter_dims([0, 1, 2]) unique:
558    (tensor<f32>, tensor<4x5x6xf32>, tensor<index>) -> tensor<1x2xf32>
559  return
560}
561
562// -----
563
564func.func @scatter_coordinate_rank_mismatch1(
565    %source : tensor<f32>,
566    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x2xindex>) {
567  // expected-error@+1 {{scatter_dims length must match the size of last dimension of indices}}
568  %out = tensor.scatter %source into %dest[%indices] scatter_dims([0, 1, 2]) unique:
569    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x2xindex>) -> tensor<1x2xf32>
570  return
571}
572
573// -----
574
575func.func @scatter_coordinate_negative(
576    %source : tensor<f32>,
577    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x1xindex>) {
578  // expected-error@+1 {{scatter_dims value must be non-negative}}
579  %out = tensor.scatter %source into %dest[%indices] scatter_dims([-1]) unique:
580    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x1xindex>) -> tensor<1x2x1xf32>
581  return
582}
583
584// -----
585
586func.func @scatter_coordinate_overflow(
587    %source : tensor<f32>,
588    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x1xindex>) {
589  // expected-error@+1 {{scatter_dims value must be smaller than dest rank}}
590  %out = tensor.scatter %source into %dest[%indices] scatter_dims([42]) unique:
591    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x1xindex>) -> tensor<1x2x1xf32>
592  return
593}
594
595// -----
596
597func.func @scatter_coordinate_increase(
598    %source : tensor<f32>,
599    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x2xindex>) {
600  // expected-error@+1 {{scatter_dims values must be strictly increasing}}
601  %out = tensor.scatter %source into %dest[%indices] scatter_dims([1, 0]) unique:
602    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x2xindex>) -> tensor<1x2x1x1xf32>
603  return
604}
605
606// -----
607
608func.func @scatter_missing_unique(
609    %source : tensor<f32>,
610    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x2xindex>) {
611  // expected-error@+1 {{requires 'unique' attribute to be set}}
612  %out = tensor.scatter %source into %dest[%indices] scatter_dims([0, 2]):
613    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x2xindex>) -> tensor<1x2x1xf32>
614  return
615}
616
617// -----
618
619func.func @scatter_wrong_result_type(
620    %source : tensor<f32>,
621    %dest : tensor<4x5x6xf32>, %indices: tensor<1x2x2xindex>) {
622  // expected-error@+1 {{source type mismatch: expected 'tensor<1x2x1x5x1xf32>' or its rank-reduced variant 'tensor<1x2x5xf32>' (got: 'tensor<f32>')}}
623  %out = tensor.scatter %source into %dest[%indices] scatter_dims([0, 2]) unique:
624    (tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x2xindex>) -> tensor<1x2x1xf32>
625  return
626}
627
628// -----
629
630func.func @empty_wrong_number_of_operands(%sz : index) {
631  // expected-error@+1 {{incorrect number of dynamic sizes, has 1, expected 2}}
632  %out = tensor.empty(%sz) : tensor<2x?x?x5xf32>
633  return
634}
635
636// -----
637
638func.func @pack_invalid_no_padding_no_full_tiles(%input: tensor<256x128xf32>, %output: tensor<8x8x16x33xf32>) -> tensor<8x8x16x33xf32> {
639  // expected-error@+1 {{invalid tile factor or output size provided. Only full tiles are supported when padding_value is not set}}
640  %0 = tensor.pack %input inner_dims_pos = [1, 0] inner_tiles = [16, 33] into %output : tensor<256x128xf32>  -> tensor<8x8x16x33xf32>
641  return %0 : tensor<8x8x16x33xf32>
642}
643
644// -----
645
646func.func @pack_invalid_no_padding_no_full_tiles_dyn_tiles(%input: tensor<256x128xf32>, %output: tensor<10x8x?x?xf32>, %tile_size_0: index, %tile_size_1: index) -> tensor<10x8x?x?xf32> {
647  // expected-error@+1 {{invalid tile factor or output size provided. Only full tiles are supported when padding_value is not set}}
648  %0 = tensor.pack %input inner_dims_pos = [1, 0] inner_tiles = [%tile_size_0, %tile_size_1] into %output : tensor<256x128xf32>  -> tensor<10x8x?x?xf32>
649  return %0 : tensor<10x8x?x?xf32>
650}
651
652// -----
653
654func.func @pack_invalid_no_padding_no_full_tiles_dyn_tiles_outperm(%input: tensor<256x128xf32>, %output: tensor<8x10x?x?xf32>, %tile_size_0: index, %tile_size_1: index) -> tensor<8x10x?x?xf32> {
655  // expected-error@+1 {{invalid tile factor or output size provided. Only full tiles are supported when padding_value is not set}}
656  %0 = tensor.pack %input outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [%tile_size_0, %tile_size_1] into %output : tensor<256x128xf32>  -> tensor<8x10x?x?xf32>
657  return %0 : tensor<8x10x?x?xf32>
658}
659
660// -----
661
662func.func @pad_and_pack_invalid_type(%input: tensor<13x15xf32>, %output: tensor<2x8x8x2xf32>, %pad: i32) -> tensor<2x8x8x2xf32> {
663  // expected-error@+1 {{expected padding_value has 'f32' but got: 'i32'}}
664  %0 = tensor.pack %input padding_value(%pad: i32) inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %output : tensor<13x15xf32> -> tensor<2x8x8x2xf32>
665  return %0 : tensor<2x8x8x2xf32>
666}
667
668// -----
669
670func.func @pack_invalid_inner_dims_pos_vector(%input: tensor<256x128xf32>, %output: tensor<8x8x32x16xf32>) -> tensor<8x8x32x16xf32> {
671  // expected-error@+1 {{invalid inner_dims_pos vector}}
672  %0 = tensor.pack %input inner_dims_pos = [2, 0] inner_tiles = [2, 2] into %output : tensor<256x128xf32> -> tensor<8x8x32x16xf32>
673  return %0 : tensor<8x8x32x16xf32>
674}
675
676// -----
677
678func.func @pack_invalid_duplicate_element_in_inner_dims(%input: tensor<256x128xf32>, %output: tensor<8x8x32x16xf32>) -> tensor<8x8x32x16xf32> {
679  // expected-error@+1 {{invalid inner_dims_pos vector}}
680  %0 = tensor.pack %input inner_dims_pos = [1, 1] inner_tiles = [2, 2] into %output : tensor<256x128xf32> -> tensor<8x8x32x16xf32>
681  return %0 : tensor<8x8x32x16xf32>
682}
683
684// -----
685
686func.func @pack_invalid_duplicate_element_in_outer_perm(%input: tensor<256x128xf32>, %output: tensor<8x8x32x16xf32>) -> tensor<8x8x32x16xf32> {
687  // expected-error@+1 {{invalid outer_dims_perm vector}}
688  %0 = tensor.pack %input outer_dims_perm = [1, 1] inner_dims_pos = [0, 1] inner_tiles = [2, 2] into %output : tensor<256x128xf32> -> tensor<8x8x32x16xf32>
689  return %0 : tensor<8x8x32x16xf32>
690}
691
692// -----
693
694func.func @pack_invalid_output_rank(%input: tensor<256x128xf32>, %output: tensor<64x32x16xf32>) -> tensor<64x32x16xf32> {
695  // expected-error@+1 {{packed rank != (unpacked rank + num tiling factors), got 3 != 4}}
696  %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %output : tensor<256x128xf32> -> tensor<64x32x16xf32>
697  return %0 : tensor<64x32x16xf32>
698}
699
700// -----
701
702func.func @unpack_invalid_output_rank(%input: tensor<256x128xf32>, %output: tensor<64x32x16xf32>) -> tensor<256x128xf32> {
703  // expected-error@+1 {{packed rank != (unpacked rank + num tiling factors), got 3 != 4}}
704  %0 = tensor.unpack %output inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %input : tensor<64x32x16xf32> -> tensor<256x128xf32>
705  return %0 : tensor<256x128xf32>
706}
707
708// -----
709
710func.func @unpack_invalid_out_of_bound_outer_perm(%input: tensor<256x128xf32>, %output: tensor<8x8x32x16xf32>) -> tensor<8x8x32x16xf32> {
711  // expected-error@+1 {{invalid outer_dims_perm vector}}
712  %0 = tensor.unpack %output outer_dims_perm = [2, 1] inner_dims_pos = [0, 1] inner_tiles = [2, 2] into %input : tensor<8x8x32x16xf32> -> tensor<256x128xf32>
713  return %0 : tensor<256x128xf32>
714}
715
716// -----
717
718func.func @pack_invalid_outer_dims_perm(%source: tensor<128x256xf32>, %dest: tensor<16x4x32x16xf32>) -> tensor<16x4x32x16xf32> {
719  // expected-error@+1 {{outer_dims_perm must be a permutation or empty}}
720  %0 = tensor.pack %source outer_dims_perm = [0] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<16x4x32x16xf32>
721  return %0 : tensor<16x4x32x16xf32>
722}
723
724// -----
725
726func.func @unpack_invalid_outer_dims_perm(%source: tensor<128x256xf32>, %dest: tensor<16x4x32x16xf32>) -> tensor<128x256xf32> {
727  // expected-error@+1 {{outer_dims_perm must be a permutation or empty}}
728  %0 = tensor.unpack %dest outer_dims_perm = [1] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %source : tensor<16x4x32x16xf32> -> tensor<128x256xf32>
729  return %0 : tensor<128x256xf32>
730}
731
732// -----
733
734func.func @pack_invalid(%input: tensor<256x128xf32>, %output: tensor<8x8x32x16xf32>) -> tensor<8x8x32x16xf32> {
735  // expected-error@+1 {{the shape of output is not large enough to hold the packed data. Expected at least 'tensor<8x8x16x32xf32>', got 'tensor<8x8x32x16xf32>'}}
736  %0 = tensor.pack %input inner_dims_pos = [1, 0] inner_tiles = [16, 32] into %output : tensor<256x128xf32> -> tensor<8x8x32x16xf32>
737  return %0 : tensor<8x8x32x16xf32>
738}
739
740// -----
741
742func.func @unpack_invalid(%output: tensor<256x128xf32>, %input: tensor<8x8x32x16xf32>) -> tensor<256x128xf32> {
743  // expected-error@+1 {{the shape of output is not large enough to hold the packed data. Expected at least 'tensor<8x32x4x32xf32>', got 'tensor<8x8x32x16xf32>'}}
744  %0 = tensor.unpack %input inner_dims_pos = [1, 0] inner_tiles = [4, 32] into %output : tensor<8x8x32x16xf32> -> tensor<256x128xf32>
745  return %0 : tensor<256x128xf32>
746}
747
748// -----
749
750func.func @pack_invalid(%input: tensor<256x128xf32>, %output: tensor<8x8x32x16xf32>) -> tensor<8x8x32x16xf32> {
751  // expected-error@+1 {{invalid zero tile factor}}
752  %0 = tensor.pack %input inner_dims_pos = [1, 0] inner_tiles = [0, 2] into %output : tensor<256x128xf32> -> tensor<8x8x32x16xf32>
753  return %0 : tensor<8x8x32x16xf32>
754}
755
756// -----
757func.func @pack_mismatch_inner_tile_size_and_output_shape(
758  %input : tensor<?x?xf32>, %output : tensor<?x?x8x8xf32>) -> tensor<?x?x8x8xf32> {
759  // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
760  %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor<?x?xf32> -> tensor<?x?x8x8xf32>
761  return %0 : tensor<?x?x8x8xf32>
762}
763
764// -----
765
766func.func @pack_dynamic_inner_tile_size_and_static_output_shape(
767  %input : tensor<?x?xf32>, %output : tensor<?x?x8x8xf32>) -> tensor<?x?x8x8xf32> {
768  %c8 = arith.constant 8 : index
769  // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
770  %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, %c8] into %output : tensor<?x?xf32> -> tensor<?x?x8x8xf32>
771  return %0 : tensor<?x?x8x8xf32>
772}
773
774// -----
775
776func.func @pack_static_inner_tile_size_and_dynamic_output_shape(
777  %input : tensor<?x?xf32>, %output : tensor<?x?x8x?xf32>) -> tensor<?x?x8x?xf32> {
778  // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
779  %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %output : tensor<?x?xf32> -> tensor<?x?x8x?xf32>
780  return %0 : tensor<?x?x8x?xf32>
781}
782
783// -----
784
785func.func @unpack_mismatch_inner_tile_size_and_output_shape(
786  %input : tensor<?x?x8x8xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
787  // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
788  %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor<?x?x8x8xf32> -> tensor<?x?xf32>
789  return %0 : tensor<?x?xf32>
790}
791
792// -----
793
794func.func @unpack_dynamic_inner_tile_size_and_static_output_shape(
795  %input : tensor<?x?x8x4xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
796  %c8 = arith.constant 8 : index
797  // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
798  %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [%c8, 4] into %output : tensor<?x?x8x4xf32> -> tensor<?x?xf32>
799  return %0 : tensor<?x?xf32>
800}
801
802// -----
803
804func.func @unpack_static_inner_tile_size_and_dynamic_output_shape(
805  %input : tensor<?x?x?x4xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
806  // expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
807  %0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor<?x?x?x4xf32> -> tensor<?x?xf32>
808  return %0 : tensor<?x?xf32>
809}
810
811// -----
812
813func.func @bitcast_index_0(%arg0 : tensor<?xi64>) -> tensor<?xindex> {
814  // expected-error @+1 {{'tensor.bitcast' op result #0 must be tensor of signless integer or unsigned integer or signed integer or floating-point values, but got 'tensor<?xindex>'}}
815  %0 = tensor.bitcast %arg0 : tensor<?xi64> to tensor<?xindex>
816  return %0 : tensor<?xindex>
817}
818
819// -----
820
821func.func @bitcast_index_1(%arg0 : tensor<?xindex>) -> tensor<?xi64> {
822  // expected-error @+1 {{'tensor.bitcast' op operand #0 must be tensor of signless integer or unsigned integer or signed integer or floating-point values, but got 'tensor<?xindex>'}}
823  %0 = tensor.bitcast %arg0 : tensor<?xindex> to tensor<?xi64>
824  return %0 : tensor<?xi64>
825}
826