xref: /llvm-project/mlir/test/Dialect/SparseTensor/invalid.mlir (revision 77f8297c6fdaa62121ddb108043dcaad5c45c7ad)
1// RUN: mlir-opt %s -split-input-file -verify-diagnostics
2
3func.func @invalid_new_dense(%arg0: !llvm.ptr) -> tensor<32xf32> {
4  // expected-error@+1 {{'sparse_tensor.new' op result #0 must be sparse tensor of any type values, but got 'tensor<32xf32>'}}
5  %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<32xf32>
6  return %0 : tensor<32xf32>
7}
8
9// -----
10
11#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed), posWidth=32, crdWidth=32}>
12
13func.func @non_static_pack_ret(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>)
14                            -> tensor<?xf64, #SparseVector> {
15  // expected-error@+1 {{the sparse-tensor must have static shape}}
16  %0 = sparse_tensor.assemble (%pos, %coordinates), %values
17     : (tensor<2xi32>, tensor<6x1xi32>), tensor<6xf64> to tensor<?xf64, #SparseVector>
18  return %0 : tensor<?xf64, #SparseVector>
19}
20
21// -----
22
23#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed), posWidth=32, crdWidth=32}>
24
25func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>)
26                            -> tensor<100xf32, #SparseVector> {
27  // expected-error@+1 {{input/output element-types don't match}}
28  %0 = sparse_tensor.assemble (%pos, %coordinates), %values
29     : (tensor<2xi32>, tensor<6x1xi32>), tensor<6xf64> to tensor<100xf32, #SparseVector>
30  return %0 : tensor<100xf32, #SparseVector>
31}
32
33// -----
34
35#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}>
36
37func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>)
38                            -> tensor<100x2xf64, #SparseVector> {
39  // expected-error@+1 {{input/output trailing COO level-ranks don't match}}
40  %0 = sparse_tensor.assemble (%pos, %coordinates), %values
41     : (tensor<2xi32>, tensor<6x3xi32>), tensor<6xf64> to tensor<100x2xf64, #SparseVector>
42  return %0 : tensor<100x2xf64, #SparseVector>
43}
44
45// -----
46
47#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed), posWidth=32, crdWidth=32}>
48
49func.func @invalid_pack_mis_position(%values: tensor<6xf64>, %coordinates: tensor<6xi32>)
50                                     -> tensor<2x100xf64, #CSR> {
51  // expected-error@+1 {{inconsistent number of fields between input/output}}
52  %0 = sparse_tensor.assemble (%coordinates), %values
53     : (tensor<6xi32>), tensor<6xf64> to tensor<2x100xf64, #CSR>
54  return %0 : tensor<2x100xf64, #CSR>
55}
56
57// -----
58
59#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed), posWidth=32, crdWidth=32}>
60
61func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>) {
62  // expected-error@+1 {{input/output element-types don't match}}
63  %rp, %rc, %rv, %pl, %cl, %vl = sparse_tensor.disassemble %sp : tensor<100xf32, #SparseVector>
64                  out_lvls(%pos, %coordinates : tensor<2xi32>, tensor<6x1xi32>)
65                  out_vals(%values : tensor<6xf64>)
66                  -> (tensor<2xi32>, tensor<6x1xi32>), tensor<6xf64>, (index, index), index
67  return
68}
69
70// -----
71
72#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}>
73
74func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) {
75  // expected-error@+1 {{input/output trailing COO level-ranks don't match}}
76  %rp, %rc, %rv, %pl, %cl, %vl = sparse_tensor.disassemble %sp : tensor<100x2xf64, #SparseVector>
77                  out_lvls(%pos, %coordinates : tensor<2xi32>, tensor<6x3xi32> )
78                  out_vals(%values : tensor<6xf64>)
79                  -> (tensor<2xi32>, tensor<6x3xi32>), tensor<6xf64>, (index, index), index
80  return
81}
82
83// -----
84
85#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed), posWidth=32, crdWidth=32}>
86
87func.func @invalid_unpack_mis_position(%sp: tensor<2x100xf64, #CSR>, %values: tensor<6xf64>, %coordinates: tensor<6xi32>) {
88  // expected-error@+1 {{inconsistent number of fields between input/output}}
89  %rc, %rv, %cl, %vl = sparse_tensor.disassemble %sp : tensor<2x100xf64, #CSR>
90             out_lvls(%coordinates : tensor<6xi32>)
91             out_vals(%values : tensor<6xf64>)
92             -> (tensor<6xi32>), tensor<6xf64>, (index), index
93  return
94}
95
96// -----
97
98func.func @invalid_positions_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
99  // expected-error@+1 {{'sparse_tensor.positions' op operand #0 must be sparse tensor of any type values, but got 'tensor<128xf64>'}}
100  %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64> to memref<?xindex>
101  return %0 : memref<?xindex>
102}
103
104// -----
105
106func.func @invalid_positions_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
107  // expected-error@+1 {{'sparse_tensor.positions' op operand #0 must be sparse tensor of any type values, but got 'tensor<*xf64>'}}
108  %0 = "sparse_tensor.positions"(%arg0) { level = 0 : index } : (tensor<*xf64>) -> (memref<?xindex>)
109  return %0 : memref<?xindex>
110}
111
112// -----
113
114#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed), posWidth=32}>
115
116func.func @mismatch_positions_types(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
117  // expected-error@+1 {{unexpected type for positions}}
118  %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector> to memref<?xindex>
119  return %0 : memref<?xindex>
120}
121
122// -----
123
124#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
125
126func.func @positions_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
127  // expected-error@+1 {{requested level is out of bounds}}
128  %0 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<128xf64, #SparseVector> to memref<?xindex>
129  return %0 : memref<?xindex>
130}
131
132// -----
133
134func.func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref<?xindex> {
135  // expected-error@+1 {{'sparse_tensor.coordinates' op operand #0 must be sparse tensor of any type values, but got 'tensor<10x10xi32>'}}
136  %0 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<10x10xi32> to memref<?xindex>
137  return %0 : memref<?xindex>
138}
139
140// -----
141
142func.func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
143  // expected-error@+1 {{'sparse_tensor.coordinates' op operand #0 must be sparse tensor of any type values, but got 'tensor<*xf64>'}}
144  %0 = "sparse_tensor.coordinates"(%arg0) { level = 0 : index } : (tensor<*xf64>) -> (memref<?xindex>)
145  return %0 : memref<?xindex>
146}
147
148// -----
149
150#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
151
152func.func @mismatch_indices_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xi32> {
153  // expected-error@+1 {{unexpected type for coordinates}}
154  %0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<?xf64, #SparseVector> to memref<?xi32>
155  return %0 : memref<?xi32>
156}
157
158// -----
159
160#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
161
162func.func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
163  // expected-error@+1 {{requested level is out of bounds}}
164  %0 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<128xf64, #SparseVector> to memref<?xindex>
165  return %0 : memref<?xindex>
166}
167
168// -----
169
170func.func @invalid_values_dense(%arg0: tensor<1024xf32>) -> memref<?xf32> {
171  // expected-error@+1 {{'sparse_tensor.values' op operand #0 must be sparse tensor of any type values, but got 'tensor<1024xf32>'}}
172  %0 = sparse_tensor.values %arg0 : tensor<1024xf32> to memref<?xf32>
173  return %0 : memref<?xf32>
174}
175
176// -----
177
178#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
179
180func.func @indices_buffer_noncoo(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
181  // expected-error@+1 {{expected sparse tensor with a COO region}}
182  %0 = sparse_tensor.coordinates_buffer %arg0 : tensor<128xf64, #SparseVector> to memref<?xindex>
183  return %0 : memref<?xindex>
184}
185
186// -----
187
188func.func @indices_buffer_dense(%arg0: tensor<1024xf32>) -> memref<?xindex> {
189  // expected-error@+1 {{must be sparse tensor of any type values}}
190  %0 = sparse_tensor.coordinates_buffer %arg0 : tensor<1024xf32> to memref<?xindex>
191  return %0 : memref<?xindex>
192}
193
194// -----
195
196#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
197
198func.func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32> {
199  // expected-error@+1 {{unexpected mismatch in element types}}
200  %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf32>
201  return %0 : memref<?xf32>
202}
203
204// -----
205
206#CSR_SLICE = #sparse_tensor.encoding<{
207  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
208}>
209
210func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
211  // expected-error@+1 {{requested dimension out of bound}}
212  %0 = sparse_tensor.slice.offset %arg0 at 2 : tensor<2x8xf64, #CSR_SLICE>
213  return %0 : index
214}
215
216// -----
217
218#CSR_SLICE = #sparse_tensor.encoding<{
219  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
220}>
221
222func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
223  // expected-error@+1 {{requested dimension out of bound}}
224  %0 = sparse_tensor.slice.stride %arg0 at 2 : tensor<2x8xf64, #CSR_SLICE>
225  return %0 : index
226}
227
228// -----
229
230#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
231
232func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index {
233  // expected-error@+1 {{redundant level argument for querying value memory size}}
234  %0 = sparse_tensor.storage_specifier.get %arg0 val_mem_sz at 0
235       : !sparse_tensor.storage_specifier<#SparseVector>
236  return %0 : index
237}
238
239// -----
240
241#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
242
243func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> i64 {
244  // expected-error@+1 {{requested slice data on non-slice tensor}}
245  %0 = sparse_tensor.storage_specifier.get %arg0 dim_offset at 0
246       : !sparse_tensor.storage_specifier<#SparseVector>
247  return %0 : index
248}
249
250// -----
251
252#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
253
254func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index {
255  // expected-error@+1 {{missing level argument}}
256  %0 = sparse_tensor.storage_specifier.get %arg0 crd_mem_sz
257       : !sparse_tensor.storage_specifier<#SparseVector>
258  return %0 : index
259}
260
261// -----
262
263#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
264
265func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index {
266  // expected-error@+1 {{requested level is out of bounds}}
267  %0 = sparse_tensor.storage_specifier.get %arg0 lvl_sz at 1
268       : !sparse_tensor.storage_specifier<#SparseVector>
269  return %0 : index
270}
271
272// -----
273
274#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
275
276func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index {
277  // expected-error@+1 {{requested position memory size on a singleton level}}
278  %0 = sparse_tensor.storage_specifier.get %arg0 pos_mem_sz at 1
279       : !sparse_tensor.storage_specifier<#COO>
280  return %0 : index
281}
282
283// -----
284
285func.func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> {
286  // expected-error@+1 {{'sparse_tensor.load' op operand #0 must be sparse tensor of any type values, but got 'tensor<16x32xf64>'}}
287  %0 = sparse_tensor.load %arg0 : tensor<16x32xf64>
288  return %0 : tensor<16x32xf64>
289}
290
291// -----
292
293func.func @sparse_push_back(%arg0: index, %arg1: memref<?xf64>, %arg2: f32) -> (memref<?xf64>, index) {
294  // expected-error@+1 {{'sparse_tensor.push_back' op failed to verify that value type matches element type of inBuffer}}
295  %0:2 = sparse_tensor.push_back %arg0, %arg1, %arg2 : index, memref<?xf64>, f32
296  return %0#0, %0#1 : memref<?xf64>, index
297}
298
299// -----
300
301func.func @sparse_push_back_n(%arg0: index, %arg1: memref<?xf32>, %arg2: f32) -> (memref<?xf32>, index) {
302  %c0 = arith.constant 0: index
303  // expected-error@+1 {{'sparse_tensor.push_back' op n must be not less than 1}}
304  %0:2 = sparse_tensor.push_back %arg0, %arg1, %arg2, %c0 : index, memref<?xf32>, f32, index
305  return %0#0, %0#1 : memref<?xf32>, index
306}
307
308// -----
309
310func.func @sparse_unannotated_expansion(%arg0: tensor<128xf64>) {
311  // expected-error@+1 {{'sparse_tensor.expand' op operand #0 must be sparse tensor of any type values, but got 'tensor<128xf64>'}}
312  %values, %filled, %added, %count = sparse_tensor.expand %arg0
313    : tensor<128xf64> to memref<?xf64>, memref<?xi1>, memref<?xindex>
314  return
315}
316
317// -----
318
319func.func @sparse_unannotated_compression(%arg0: memref<?xf64>,
320                                          %arg1: memref<?xi1>,
321                                          %arg2: memref<?xindex>,
322                                          %arg3: index,
323                                          %arg4: tensor<8x8xf64>,
324                                          %arg5: index) {
325  // expected-error@+1 {{'sparse_tensor.compress' op operand #4 must be sparse tensor of any type values, but got 'tensor<8x8xf64>'}}
326  sparse_tensor.compress %arg0, %arg1, %arg2, %arg3 into %arg4[%arg5]
327    : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<8x8xf64>
328  return
329}
330
331// -----
332
333#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
334
335func.func @sparse_wrong_arity_compression(%arg0: memref<?xf64>,
336                                          %arg1: memref<?xi1>,
337                                          %arg2: memref<?xindex>,
338                                          %arg3: index,
339                                          %arg4: tensor<8x8xf64, #CSR>,
340                                          %arg5: index) {
341  // expected-error@+1 {{'sparse_tensor.compress' op incorrect number of coordinates}}
342  sparse_tensor.compress %arg0, %arg1, %arg2, %arg3 into %arg4[%arg5,%arg5]
343    : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<8x8xf64, #CSR>
344  return
345}
346
347// -----
348
349func.func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
350  // expected-error@+1 {{invalid kind of type specified}}
351  %0 = sparse_tensor.convert %arg0 : tensor<*xf32> to tensor<10xf32>
352  return %0 : tensor<10xf32>
353}
354
355// -----
356
357#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
358
359func.func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor<?xf64> {
360  // expected-error@+1 {{unexpected conversion mismatch in rank}}
361  %0 = sparse_tensor.convert %arg0 : tensor<10x10xf64, #DCSR> to tensor<?xf64>
362  return %0 : tensor<?xf64>
363}
364
365// -----
366
367#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
368
369func.func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
370  // expected-error@+1 {{unexpected conversion mismatch in dimension 1}}
371  %0 = sparse_tensor.convert %arg0 : tensor<10x?xf32> to tensor<10x10xf32, #CSR>
372  return %0 : tensor<10x10xf32, #CSR>
373}
374
375// -----
376
377func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr) {
378  // expected-error@+1 {{'sparse_tensor.out' op operand #0 must be sparse tensor of any type values, but got 'tensor<10xf64>'}}
379  sparse_tensor.out %arg0, %arg1 : tensor<10xf64>, !llvm.ptr
380  return
381}
382
383// -----
384
385#CSR = #sparse_tensor.encoding<{
386  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
387}>
388
389func.func @sparse_convert_to_slice(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
390  // expected-error@+1 {{cannot convert to a sparse tensor slice}}
391  %0 = sparse_tensor.convert %arg0 : tensor<10x?xf32> to tensor<10x10xf32, #CSR>
392  return %0 : tensor<10x10xf32, #CSR>
393}
394
395// -----
396
397func.func @invalid_binary_num_args_mismatch_overlap(%arg0: f64, %arg1: f64) -> f64 {
398  // expected-error@+1 {{overlap region must have exactly 2 arguments}}
399  %r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
400    overlap={
401      ^bb0(%x: f64):
402        sparse_tensor.yield %x : f64
403    }
404    left={}
405    right={}
406  return %r : f64
407}
408
409// -----
410
411func.func @invalid_binary_num_args_mismatch_right(%arg0: f64, %arg1: f64) -> f64 {
412  // expected-error@+1 {{right region must have exactly 1 arguments}}
413  %r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
414    overlap={}
415    left={}
416    right={
417      ^bb0(%x: f64, %y: f64):
418        sparse_tensor.yield %y : f64
419    }
420  return %r : f64
421}
422
423// -----
424
425func.func @invalid_binary_argtype_mismatch(%arg0: f64, %arg1: f64) -> f64 {
426  // expected-error@+1 {{overlap region argument 2 type mismatch}}
427  %r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
428    overlap={
429      ^bb0(%x: f64, %y: f32):
430        sparse_tensor.yield %x : f64
431    }
432    left=identity
433    right=identity
434  return %r : f64
435}
436
437// -----
438
439func.func @invalid_binary_wrong_return_type(%arg0: f64, %arg1: f64) -> f64 {
440  // expected-error@+1 {{left region yield type mismatch}}
441  %0 = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
442    overlap={}
443    left={
444      ^bb0(%x: f64):
445        %1 = arith.constant 0.0 : f32
446        sparse_tensor.yield %1 : f32
447    }
448    right=identity
449  return %0 : f64
450}
451
452// -----
453
454func.func @invalid_binary_wrong_identity_type(%arg0: i64, %arg1: f64) -> f64 {
455  // expected-error@+1 {{left=identity requires first argument to have the same type as the output}}
456  %0 = sparse_tensor.binary %arg0, %arg1 : i64, f64 to f64
457    overlap={}
458    left=identity
459    right=identity
460  return %0 : f64
461}
462
463// -----
464
465func.func @invalid_binary_wrong_yield(%arg0: f64, %arg1: f64) -> f64 {
466  // expected-error@+1 {{left region must end with sparse_tensor.yield}}
467  %0 = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
468    overlap={}
469    left={
470      ^bb0(%x: f64):
471        tensor.yield %x : f64
472    }
473    right=identity
474  return %0 : f64
475}
476
477// -----
478
479func.func @invalid_unary_argtype_mismatch(%arg0: f64) -> f64 {
480  // expected-error@+1 {{present region argument 1 type mismatch}}
481  %r = sparse_tensor.unary %arg0 : f64 to f64
482    present={
483      ^bb0(%x: index):
484        sparse_tensor.yield %x : index
485    }
486    absent={}
487  return %r : f64
488}
489
490// -----
491
492func.func @invalid_unary_num_args_mismatch(%arg0: f64) -> f64 {
493  // expected-error@+1 {{absent region must have exactly 0 arguments}}
494  %r = sparse_tensor.unary %arg0 : f64 to f64
495    present={}
496    absent={
497      ^bb0(%x: f64):
498        sparse_tensor.yield %x : f64
499    }
500  return %r : f64
501}
502
503// -----
504
505func.func @invalid_unary_wrong_return_type(%arg0: f64) -> f64 {
506  // expected-error@+1 {{present region yield type mismatch}}
507  %0 = sparse_tensor.unary %arg0 : f64 to f64
508    present={
509      ^bb0(%x: f64):
510        %1 = arith.constant 0.0 : f32
511        sparse_tensor.yield %1 : f32
512    }
513    absent={}
514  return %0 : f64
515}
516
517// -----
518
519func.func @invalid_unary_wrong_yield(%arg0: f64) -> f64 {
520  // expected-error@+1 {{present region must end with sparse_tensor.yield}}
521  %0 = sparse_tensor.unary %arg0 : f64 to f64
522    present={
523      ^bb0(%x: f64):
524        tensor.yield %x : f64
525    }
526    absent={}
527  return %0 : f64
528}
529
530// -----
531
532
533#SparseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
534
535#trait = {
536  indexing_maps = [ affine_map<(i) -> (i)>, affine_map<(i) -> (i)> ],
537  iterator_types = ["parallel"]
538}
539
540func.func @invalid_absent_value(%arg0 : tensor<100xf64, #SparseVector>) -> tensor<100xf64, #SparseVector> {
541  %C = tensor.empty() : tensor<100xf64, #SparseVector>
542  %0 = linalg.generic #trait
543    ins(%arg0: tensor<100xf64, #SparseVector>)
544    outs(%C: tensor<100xf64, #SparseVector>) {
545     ^bb0(%a: f64, %c: f64) :
546        // expected-error@+1 {{absent region cannot yield linalg argument}}
547        %result = sparse_tensor.unary %a : f64 to f64
548           present={}
549           absent={ sparse_tensor.yield %a : f64 }
550        linalg.yield %result : f64
551    } -> tensor<100xf64, #SparseVector>
552  return %0 : tensor<100xf64, #SparseVector>
553}
554
555// -----
556
557#SparseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
558
559#trait = {
560  indexing_maps = [ affine_map<(i) -> (i)>, affine_map<(i) -> (i)> ],
561  iterator_types = ["parallel"]
562}
563
564func.func @invalid_absent_computation(%arg0 : tensor<100xf64, #SparseVector>) -> tensor<100xf64, #SparseVector> {
565  %f0 = arith.constant 0.0 : f64
566  %C = tensor.empty() : tensor<100xf64, #SparseVector>
567  %0 = linalg.generic #trait
568    ins(%arg0: tensor<100xf64, #SparseVector>)
569    outs(%C: tensor<100xf64, #SparseVector>) {
570     ^bb0(%a: f64, %c: f64) :
571        %v = arith.addf %a, %f0 : f64
572        // expected-error@+1 {{absent region cannot yield locally computed value}}
573        %result = sparse_tensor.unary %a : f64 to f64
574           present={}
575           absent={ sparse_tensor.yield %v : f64 }
576        linalg.yield %result : f64
577    } -> tensor<100xf64, #SparseVector>
578  return %0 : tensor<100xf64, #SparseVector>
579}
580
581// -----
582
583func.func @invalid_reduce_num_args_mismatch(%arg0: f64, %arg1: f64) -> f64 {
584  %cf1 = arith.constant 1.0 : f64
585  // expected-error@+1 {{reduce region must have exactly 2 arguments}}
586  %r = sparse_tensor.reduce %arg0, %arg1, %cf1 : f64 {
587      ^bb0(%x: f64):
588        sparse_tensor.yield %x : f64
589    }
590  return %r : f64
591}
592
593// -----
594
595func.func @invalid_reduce_block_arg_type_mismatch(%arg0: i64, %arg1: i64) -> i64 {
596  %ci1 = arith.constant 1 : i64
597  // expected-error@+1 {{reduce region argument 1 type mismatch}}
598  %r = sparse_tensor.reduce %arg0, %arg1, %ci1 : i64 {
599      ^bb0(%x: f64, %y: f64):
600        %cst = arith.constant 2 : i64
601        sparse_tensor.yield %cst : i64
602    }
603  return %r : i64
604}
605
606// -----
607
608func.func @invalid_reduce_return_type_mismatch(%arg0: f64, %arg1: f64) -> f64 {
609  %cf1 = arith.constant 1.0 : f64
610  // expected-error@+1 {{reduce region yield type mismatch}}
611  %r = sparse_tensor.reduce %arg0, %arg1, %cf1 : f64 {
612      ^bb0(%x: f64, %y: f64):
613        %cst = arith.constant 2 : i64
614        sparse_tensor.yield %cst : i64
615    }
616  return %r : f64
617}
618
619// -----
620
621func.func @invalid_reduce_wrong_yield(%arg0: f64, %arg1: f64) -> f64 {
622  %cf1 = arith.constant 1.0 : f64
623  // expected-error@+1 {{reduce region must end with sparse_tensor.yield}}
624  %r = sparse_tensor.reduce %arg0, %arg1, %cf1 : f64 {
625      ^bb0(%x: f64, %y: f64):
626        %cst = arith.constant 2 : i64
627        tensor.yield %cst : i64
628    }
629  return %r : f64
630}
631
632// -----
633
634func.func @invalid_select_num_args_mismatch(%arg0: f64) -> f64 {
635  // expected-error@+1 {{select region must have exactly 1 arguments}}
636  %r = sparse_tensor.select %arg0 : f64 {
637      ^bb0(%x: f64, %y: f64):
638        %ret = arith.constant 1 : i1
639        sparse_tensor.yield %ret : i1
640    }
641  return %r : f64
642}
643
644// -----
645
646func.func @invalid_select_return_type_mismatch(%arg0: f64) -> f64 {
647  // expected-error@+1 {{select region yield type mismatch}}
648  %r = sparse_tensor.select %arg0 : f64 {
649      ^bb0(%x: f64):
650        sparse_tensor.yield %x : f64
651    }
652  return %r : f64
653}
654
655// -----
656
657func.func @invalid_select_wrong_yield(%arg0: f64) -> f64 {
658  // expected-error@+1 {{select region must end with sparse_tensor.yield}}
659  %r = sparse_tensor.select %arg0 : f64 {
660      ^bb0(%x: f64):
661        tensor.yield %x : f64
662    }
663  return %r : f64
664}
665
666// -----
667
668#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
669func.func @invalid_concat_less_inputs(%arg: tensor<9x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
670  // expected-error@+1 {{Need at least two tensors to concatenate.}}
671  %0 = sparse_tensor.concatenate %arg {dimension = 1 : index}
672       : tensor<9x4xf64, #DC> to tensor<9x4xf64, #DC>
673  return %0 : tensor<9x4xf64, #DC>
674}
675
676// -----
677
678#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
679func.func @invalid_concat_dim(%arg0: tensor<2x4xf64, #DC>,
680                              %arg1: tensor<3x4xf64, #DC>,
681                              %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
682  // expected-error@+1 {{Concat-dimension is out of bounds for dimension-rank (4 >= 2)}}
683  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 4 : index}
684       : tensor<2x4xf64, #DC>,
685         tensor<3x4xf64, #DC>,
686         tensor<4x4xf64, #DC> to tensor<9x4xf64, #DC>
687  return %0 : tensor<9x4xf64, #DC>
688}
689
690// -----
691
692#C = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
693#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
694#DCC = #sparse_tensor.encoding<{map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed)}>
695func.func @invalid_concat_rank_mismatch(%arg0: tensor<2xf64, #C>,
696                                        %arg1: tensor<3x4xf64, #DC>,
697                                        %arg2: tensor<4x4x4xf64, #DCC>) -> tensor<9x4xf64, #DC> {
698  // expected-error@+1 {{Input tensor $0 has a different rank (rank=1) from the output tensor (rank=2)}}
699  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
700       : tensor<2xf64, #C>,
701         tensor<3x4xf64, #DC>,
702         tensor<4x4x4xf64, #DCC> to tensor<9x4xf64, #DC>
703  return %0 : tensor<9x4xf64, #DC>
704}
705
706// -----
707
708#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
709func.func @invalid_concat_size_mismatch_dyn(%arg0: tensor<?x4xf64, #DC>,
710                                            %arg1: tensor<5x4xf64, #DC>,
711                                            %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
712  // expected-error@+1 {{Input tensor $0 has dynamic shape}}
713  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
714       : tensor<?x4xf64, #DC>,
715         tensor<5x4xf64, #DC>,
716         tensor<4x4xf64, #DC> to tensor<9x4xf64, #DC>
717  return %0 : tensor<9x4xf64, #DC>
718}
719
720// -----
721
722#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
723func.func @invalid_concat_size_mismatch(%arg0: tensor<3x4xf64, #DC>,
724                                        %arg1: tensor<5x4xf64, #DC>,
725                                        %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
726  // expected-error@+1 {{The concatenation dimension of the output tensor should be the sum of}}
727  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
728       : tensor<3x4xf64, #DC>,
729         tensor<5x4xf64, #DC>,
730         tensor<4x4xf64, #DC> to tensor<9x4xf64, #DC>
731  return %0 : tensor<9x4xf64, #DC>
732}
733
734// -----
735
736#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
737func.func @invalid_concat_size_mismatch(%arg0: tensor<2x4xf64, #DC>,
738                                        %arg1: tensor<3x3xf64, #DC>,
739                                        %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> {
740  // expected-error@+1 {{All dimensions (expect for the concatenating one) should be equal}}
741  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
742       : tensor<2x4xf64, #DC>,
743         tensor<3x3xf64, #DC>,
744         tensor<4x4xf64, #DC> to tensor<9x4xf64, #DC>
745  return %0 : tensor<9x4xf64, #DC>
746}
747
748// -----
749
750#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
751func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () {
752  // expected-error@+1 {{Unmatched number of arguments in the block}}
753  sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do {
754    ^bb0(%1: index, %2: index, %3: index, %v: f64) :
755  }
756  return
757}
758
759// -----
760
761#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
762func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () {
763  // expected-error@+1 {{Expecting Index type for argument at index 1}}
764  sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do {
765    ^bb0(%1: index, %2: f64, %v: f64) :
766  }
767  return
768}
769
770// -----
771
772#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
773func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () {
774  // expected-error@+1 {{Unmatched element type between input tensor and block argument}}
775  sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do {
776    ^bb0(%1: index, %2: index, %v: f32) :
777  }
778  return
779}
780
781// -----
782
783#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
784func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () {
785  // expected-error@+1 {{Unmatched element type between input tensor and block argument}}
786  sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do {
787    ^bb0(%1: index, %2: index, %v: f32) :
788  }
789  return
790}
791
792// -----
793
794#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
795func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () {
796  // expected-error@+1 {{Mismatch in number of init arguments and results}}
797  sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 do {
798    ^bb0(%1: index, %2: index, %v: f32, %r1 : i32) :
799  }
800  return
801}
802
803// -----
804
805#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
806func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () {
807  // expected-error@+1 {{Mismatch in types of init arguments and results}}
808  %1 = sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 -> i32 do {
809    ^bb0(%1: index, %2: index, %v: f32, %r0 : f32) :
810  }
811  return
812}
813
814// -----
815
816#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
817func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () {
818  // expected-error@+1 {{Mismatch in types of yield values and results}}
819  %1 = sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 -> f32 do {
820    ^bb0(%1: index, %2: index, %v: f32, %r0 : f32) :
821      sparse_tensor.yield %1 : index
822  }
823  return
824}
825
826
827// -----
828
829#MAP = affine_map<(i,j) -> (i,j)>
830
831func.func @sparse_sort_coo_x_type( %arg0: index, %arg1: memref<?xf32>) {
832  // expected-error@+1 {{operand #1 must be 1D memref of integer or index values}}
833  sparse_tensor.sort insertion_sort_stable %arg0, %arg1 {perm_map = #MAP} : memref<?xf32>
834  return
835}
836
837// -----
838
839#MAP = affine_map<(i,j) -> (i,j)>
840
841func.func @sparse_sort_coo_x_too_small(%arg0: memref<50xindex>) {
842  %i20 = arith.constant 20 : index
843  // expected-error@+1 {{Expected dimension(xy) >= n * (rank(perm_map) + ny) got 50 < 60}}
844  sparse_tensor.sort hybrid_quick_sort %i20, %arg0 {perm_map = #MAP, ny = 1 : index} : memref<50xindex>
845  return
846}
847
848// -----
849
850#MAP = affine_map<(i,j) -> (i,j)>
851
852func.func @sparse_sort_coo_y_too_small(%arg0: memref<60xindex>, %arg1: memref<10xf32>) {
853  %i20 = arith.constant 20 : index
854  // expected-error@+1 {{Expected dimension(y) >= n got 10 < 20}}
855  sparse_tensor.sort insertion_sort_stable %i20, %arg0 jointly %arg1 {perm_map = #MAP, ny = 1 : index} : memref<60xindex> jointly memref<10xf32>
856  return
857}
858
859// -----
860
861#NON_PERM_MAP = affine_map<(i,j) -> (i,i)>
862
863func.func @sparse_sort_coo_no_perm(%arg0: index, %arg1: memref<?xindex>) -> (memref<?xindex>) {
864  // expected-error@+1 {{Expected a permutation map, got (d0, d1) -> (d0, d0)}}
865  sparse_tensor.sort hybrid_quick_sort %arg0, %arg1 {perm_map = #NON_PERM_MAP, ny = 1 : index}: memref<?xindex>
866  return %arg1 : memref<?xindex>
867}
868
869// -----
870
871#UnorderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))}>
872#OrderedCOOPerm = #sparse_tensor.encoding<{map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton)}>
873
874func.func @sparse_permuted_reorder_coo(%arg0 : tensor<?x?xf32, #UnorderedCOO>) -> tensor<?x?xf32, #OrderedCOOPerm> {
875  // expected-error@+1 {{Unmatched dim2lvl map between input and result COO}}
876  %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor<?x?xf32, #UnorderedCOO> to tensor<?x?xf32, #OrderedCOOPerm>
877  return %ret : tensor<?x?xf32, #OrderedCOOPerm>
878}
879
880// -----
881
882#UnorderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))}>
883#OrderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
884
885func.func @sparse_permuted_reorder_coo(%arg0 : tensor<?x?xf32, #UnorderedCOO>) -> tensor<?x?xf64, #OrderedCOO> {
886  // expected-error@+1 {{Unmatched storage format between input and result COO}}
887  %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor<?x?xf32, #UnorderedCOO> to tensor<?x?xf64, #OrderedCOO>
888  return %ret : tensor<?x?xf64, #OrderedCOO>
889}
890
891// -----
892
893#BSR = #sparse_tensor.encoding<{
894  map = ( i, j ) ->
895  ( i floordiv 2 : dense,
896    j floordiv 3 : compressed,
897    i mod 2      : dense,
898    j mod 3      : dense
899  )
900}>
901
902func.func @sparse_crd_translate(%arg0: index, %arg1: index) -> (index, index, index) {
903  // expected-error@+1 {{Coordinate rank mismatch with encoding}}
904  %l0, %l1, %l2 = sparse_tensor.crd_translate dim_to_lvl [%arg0, %arg1] as #BSR : index, index, index
905  return  %l0, %l1, %l2 : index, index, index
906}
907
908// -----
909
910#BSR = #sparse_tensor.encoding<{
911  map = ( i, j ) ->
912  ( i floordiv 2 : dense,
913    j floordiv 3 : compressed,
914    i mod 2      : dense,
915    j mod 3      : dense
916  )
917}>
918
919func.func @sparse_crd_translate(%arg0: index, %arg1: index, %arg2: index) -> (index, index, index, index) {
920  // expected-error@+1 {{Coordinate rank mismatch with encoding}}
921  %l0, %l1, %l2, %l3 = sparse_tensor.crd_translate dim_to_lvl [%arg0, %arg1, %arg2] as #BSR : index, index, index, index
922  return  %l0, %l1, %l2, %l3 : index, index, index, index
923}
924
925// -----
926
927#BSR = #sparse_tensor.encoding<{
928  map = ( i, j ) ->
929  ( i floordiv 2 : dense,
930    j floordiv 3 : compressed,
931    i mod 2      : dense,
932    j mod 3      : dense
933  )
934}>
935
936func.func @sparse_lvl(%t : tensor<?x?xi32, #BSR>) -> index {
937  %lvl = arith.constant 5 : index
938  // expected-error@+1 {{Level index exceeds the rank of the input sparse tensor}}
939  %l0 = sparse_tensor.lvl %t, %lvl : tensor<?x?xi32, #BSR>
940  return  %l0 : index
941}
942
943// -----
944
945#BSR = #sparse_tensor.encoding<{
946  map = ( i, j ) -> ( i floordiv 2 : dense,
947                      j floordiv 3 : compressed,
948                      i mod 2      : dense,
949                      j mod 3      : dense
950  )
951}>
952
953#DSDC = #sparse_tensor.encoding<{
954  map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: compressed)
955}>
956
957func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x3xf32, #DSDC> {
958  // expected-error@+1 {{Level type mismatch between source/dest tensors}}
959  %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
960                                         to tensor<3x4x2x3xf32, #DSDC>
961  return %t1 : tensor<3x4x2x3xf32, #DSDC>
962}
963
964// -----
965
966#BSR = #sparse_tensor.encoding<{
967  map = ( i, j ) -> ( i floordiv 2 : dense,
968                      j floordiv 3 : compressed,
969                      i mod 2      : dense,
970                      j mod 3      : dense
971  )
972}>
973
974#DSDD = #sparse_tensor.encoding<{
975  map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
976}>
977
978func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x3xf32, #DSDD> {
979  // expected-error@+1 {{Element type mismatch between source/dest tensors}}
980  %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
981                                         to tensor<3x4x2x3xf32, #DSDD>
982  return %t1 : tensor<3x4x2x3xf32, #DSDD>
983}
984
985// -----
986
987#BSR = #sparse_tensor.encoding<{
988  map = ( i, j ) -> ( i floordiv 2 : dense,
989                      j floordiv 3 : compressed,
990                      i mod 2      : dense,
991                      j mod 3      : dense
992  )
993}>
994
995#DSDD = #sparse_tensor.encoding<{
996  map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
997}>
998
999func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x4xi32, #DSDD> {
1000  // expected-error@+1 {{Level size mismatch between source/dest tensors}}
1001  %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
1002                                         to tensor<3x4x2x4xi32, #DSDD>
1003  return %t1 : tensor<3x4x2x4xi32, #DSDD>
1004}
1005
1006// -----
1007
1008#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
1009
1010func.func @sparse_print(%arg0: tensor<10x10xf64>) {
1011  // expected-error@+1 {{'sparse_tensor.print' op operand #0 must be sparse tensor of any type values}}
1012  sparse_tensor.print %arg0 : tensor<10x10xf64>
1013  return
1014}
1015
1016// -----
1017
1018#COO = #sparse_tensor.encoding<{
1019  map = (i, j) -> (
1020    i : compressed(nonunique),
1021    j : singleton(soa)
1022  )
1023}>
1024
1025func.func @sparse_extract_iter_space(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#COO, lvls = 2>) {
1026  // expected-error@+1 {{'sparse_tensor.extract_iteration_space' expect larger level upper bound than lower bound}}
1027  %l1 = sparse_tensor.extract_iteration_space %sp at %it1 lvls = 2 to 0 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 2>
1028                                                                       -> !sparse_tensor.iter_space<#COO, lvls = 0 to 2>
1029  return
1030}
1031
1032// -----
1033
1034#COO = #sparse_tensor.encoding<{
1035  map = (i, j) -> (
1036    i : compressed(nonunique),
1037    j : singleton(soa)
1038  )
1039}>
1040
1041func.func @sparse_extract_iter_space(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#COO, lvls = 0>) {
1042  // expected-error@+1 {{'sparse_tensor.extract_iteration_space' op parent iterator should be specified iff level lower bound equals 0}}
1043  %l1 = sparse_tensor.extract_iteration_space %sp at %it1 lvls = 0 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 0>
1044                                                                  -> !sparse_tensor.iter_space<#COO, lvls = 1>
1045  return
1046}
1047
1048// -----
1049
1050#COO = #sparse_tensor.encoding<{
1051  map = (i, j) -> (
1052    i : compressed(nonunique),
1053    j : singleton(soa)
1054  )
1055}>
1056
1057func.func @sparse_extract_iter_space(%sp : tensor<4x8xf32, #COO>) {
1058  // expected-error@+1 {{'sparse_tensor.extract_iteration_space' op parent iterator should be specified iff level lower bound equals 0}}
1059  %l1 = sparse_tensor.extract_iteration_space %sp lvls = 1 : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 1>
1060  return
1061}
1062
1063// -----
1064
1065#COO = #sparse_tensor.encoding<{
1066  map = (i, j) -> (
1067    i : compressed(nonunique),
1068    j : singleton(soa)
1069  )
1070}>
1071
1072#CSR = #sparse_tensor.encoding<{
1073  map = (i, j) -> (
1074    i : dense,
1075    j : compressed
1076  )
1077}>
1078
1079func.func @sparse_extract_iter_space(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#CSR, lvls = 0>) {
1080  // expected-error@+1 {{'sparse_tensor.extract_iteration_space' op mismatch in parent iterator encoding and iteration space encoding.}}
1081  %l1 = sparse_tensor.extract_iteration_space %sp at %it1 lvls = 1 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#CSR, lvls = 0>
1082                                                                 -> !sparse_tensor.iter_space<#COO, lvls = 1>
1083  return
1084}
1085
1086// -----
1087
1088#COO = #sparse_tensor.encoding<{
1089  map = (i, j) -> (
1090    i : compressed(nonunique),
1091    j : singleton(soa)
1092  )
1093}>
1094
1095func.func @sparse_extract_iter_space(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#COO, lvls = 0>) {
1096  // expected-error@+1 {{'sparse_tensor.extract_iteration_space' op parent iterator should be used to extract an iteration space from a consecutive level.}}
1097  %l1 = sparse_tensor.extract_iteration_space %sp at %it1 lvls = 2 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 0>
1098                                                                  -> !sparse_tensor.iter_space<#COO, lvls = 2>
1099  return
1100}
1101
1102// -----
1103
1104#COO = #sparse_tensor.encoding<{
1105  map = (i, j) -> (
1106    i : compressed(nonunique),
1107    j : singleton(soa)
1108  )
1109}>
1110
1111#CSR = #sparse_tensor.encoding<{
1112  map = (i, j) -> (
1113    i : dense,
1114    j : compressed
1115  )
1116}>
1117
1118func.func @sparse_extract_value(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#CSR, lvls = 1>) -> f32 {
1119  // expected-error@+1 {{'sparse_tensor.extract_value' op mismatch in tensor encoding and iterator encoding.}}
1120  %f = sparse_tensor.extract_value %sp at %it1 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#CSR, lvls = 1>
1121  return %f : f32
1122}
1123
1124// -----
1125
1126#COO = #sparse_tensor.encoding<{
1127  map = (i, j) -> (
1128    i : compressed(nonunique),
1129    j : singleton(soa)
1130  )
1131}>
1132
1133func.func @sparse_extract_value(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#COO, lvls = 0>) -> f32 {
1134  // expected-error@+1 {{'sparse_tensor.extract_value' op must use last-level iterator to extract values.}}
1135  %f = sparse_tensor.extract_value %sp at %it1 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 0>
1136  return %f : f32
1137}
1138
1139// -----
1140
1141#COO = #sparse_tensor.encoding<{
1142  map = (i, j) -> (
1143    i : compressed(nonunique),
1144    j : singleton(soa)
1145  )
1146}>
1147
1148func.func @sparse_iterate(%sp : tensor<4x8xf32, #COO>, %i : index, %j : index) -> index {
1149  %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0 : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 0>
1150  // expected-error @+1 {{'sparse_tensor.iterate' op different number of region iter_args and yielded values: 2 != 1}}
1151  %r1, %r2 = sparse_tensor.iterate %it1 in %l1 at (%crd) iter_args(%si = %i, %sj = %j): !sparse_tensor.iter_space<#COO, lvls = 0> -> (index, index) {
1152    sparse_tensor.yield %si : index
1153  }
1154  return %r1 : index
1155}
1156
1157// -----
1158
1159#COO = #sparse_tensor.encoding<{
1160  map = (i, j) -> (
1161    i : compressed(nonunique),
1162    j : singleton(soa)
1163  )
1164}>
1165
1166// expected-note@+1 {{prior use here}}
1167func.func @sparse_iterate(%sp : tensor<4x8xf32, #COO>, %i : index) -> f32 {
1168  %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0 : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 0>
1169  // expected-error @+1 {{use of value '%i' expects different type than prior uses: 'f32' vs 'index'}}
1170  %r1 = sparse_tensor.iterate %it1 in %l1 at (%crd) iter_args(%outer = %i): !sparse_tensor.iter_space<#COO, lvls = 0> -> f32 {
1171    sparse_tensor.yield %outer : f32
1172  }
1173  return %r1 : f32
1174}
1175
1176// -----
1177
1178#COO = #sparse_tensor.encoding<{
1179  map = (i, j) -> (
1180    i : compressed(nonunique),
1181    j : singleton(soa)
1182  )
1183}>
1184
1185func.func @sparse_iterate(%sp : tensor<4x8xf32, #COO>, %i : index, %j : index) -> index {
1186  %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0 : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 0>
1187  // expected-error @+1 {{'sparse_tensor.iterate' op 0-th region iter_arg and 0-th yielded value have different type: 'index' != 'f32'}}
1188  %r1 = sparse_tensor.iterate %it1 in %l1 at (%crd) iter_args(%si = %i): !sparse_tensor.iter_space<#COO, lvls = 0> -> index {
1189    %y = arith.constant 1.0 :  f32
1190    sparse_tensor.yield %y : f32
1191  }
1192  return %r1 : index
1193}
1194
1195// -----
1196
1197#COO = #sparse_tensor.encoding<{
1198  map = (i, j) -> (
1199    i : compressed(nonunique),
1200    j : singleton(soa)
1201  )
1202}>
1203
1204
1205func.func @sparse_coiteration(%sp1 : !sparse_tensor.iter_space<#COO, lvls = 0>,
1206                              %sp2 : !sparse_tensor.iter_space<#COO, lvls = 1>) -> index {
1207  %init = arith.constant 0 : index
1208  // expected-error @+1 {{'sparse_tensor.coiterate' op contains duplicated cases.}}
1209  %ret = sparse_tensor.coiterate (%sp1, %sp2) at (%coord) iter_args(%arg = %init)
1210       : (!sparse_tensor.iter_space<#COO, lvls = 0>, !sparse_tensor.iter_space<#COO, lvls = 1>)
1211       -> index
1212  case %it1, _ {
1213    sparse_tensor.yield %arg : index
1214  }
1215  case %it1, _ {
1216    sparse_tensor.yield %arg : index
1217  }
1218  return %ret : index
1219}
1220
1221
1222// -----
1223
1224#COO = #sparse_tensor.encoding<{
1225  map = (i, j) -> (
1226    i : compressed(nonunique),
1227    j : singleton(soa)
1228  )
1229}>
1230
1231
1232func.func @sparse_coiteration(%sp1 : !sparse_tensor.iter_space<#COO, lvls = 0>,
1233                              %sp2 : !sparse_tensor.iter_space<#COO, lvls = 1>) -> index {
1234  %init = arith.constant 0 : index
1235  // expected-error @+1 {{'sparse_tensor.coiterate' op types mismatch between 0th yield value and defined value on 0th region}}
1236  %ret = sparse_tensor.coiterate (%sp1, %sp2) at (%coord) iter_args(%arg = %init)
1237       : (!sparse_tensor.iter_space<#COO, lvls = 0>, !sparse_tensor.iter_space<#COO, lvls = 1>)
1238       -> index
1239  case %it1, _ {
1240    %i = arith.constant 1 : i32
1241    sparse_tensor.yield %i : i32
1242  }
1243  return %ret : index
1244}
1245
1246// -----
1247
1248#COO = #sparse_tensor.encoding<{
1249  map = (i, j) -> (
1250    i : compressed(nonunique),
1251    j : singleton(soa)
1252  )
1253}>
1254
1255
1256func.func @sparse_coiteration(%sp1 : !sparse_tensor.iter_space<#COO, lvls = 0>,
1257                              %sp2 : !sparse_tensor.iter_space<#COO, lvls = 1>) -> index {
1258  %init = arith.constant 0 : index
1259  // expected-error @+1 {{'sparse_tensor.coiterate' op required out-of-bound coordinates}}
1260  %ret = sparse_tensor.coiterate (%sp1, %sp2) at (%coord1, %coord2) iter_args(%arg = %init)
1261       : (!sparse_tensor.iter_space<#COO, lvls = 0>, !sparse_tensor.iter_space<#COO, lvls = 1>)
1262       -> index
1263  case %it1, _ {
1264    %i = arith.constant 1 : i32
1265    sparse_tensor.yield %i : i32
1266  }
1267  return %ret : index
1268}
1269