xref: /llvm-project/mlir/test/Dialect/SparseTensor/roundtrip.mlir (revision 785a24f1561c610ecbce7cdfbff053e0a3a7caec)
1// RUN: mlir-opt %s -split-input-file | mlir-opt -split-input-file | FileCheck %s
2
3#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
4
5// CHECK-LABEL: func @sparse_new(
6// CHECK-SAME: %[[A:.*]]: !llvm.ptr)
7//       CHECK: %[[T:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor<128xf64, #{{.*}}>
8//       CHECK: return %[[T]] : tensor<128xf64, #{{.*}}>
9func.func @sparse_new(%arg0: !llvm.ptr) -> tensor<128xf64, #SparseVector> {
10  %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<128xf64, #SparseVector>
11  return %0 : tensor<128xf64, #SparseVector>
12}
13
14// -----
15
16#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed), posWidth=32, crdWidth=32}>
17
18// CHECK-LABEL: func @sparse_pack(
19// CHECK-SAME: %[[P:.*]]: tensor<2xi32>,
20// CHECK-SAME: %[[I:.*]]: tensor<6x1xi32>,
21// CHECK-SAME: %[[D:.*]]: tensor<6xf64>)
22//       CHECK: %[[R:.*]] = sparse_tensor.assemble (%[[P]], %[[I]]), %[[D]]
23//       CHECK: return %[[R]] : tensor<100xf64, #{{.*}}>
24func.func @sparse_pack(%pos: tensor<2xi32>, %index: tensor<6x1xi32>, %data: tensor<6xf64>)
25                            -> tensor<100xf64, #SparseVector> {
26  %0 = sparse_tensor.assemble (%pos, %index), %data: (tensor<2xi32>, tensor<6x1xi32>), tensor<6xf64>
27                                             to tensor<100xf64, #SparseVector>
28  return %0 : tensor<100xf64, #SparseVector>
29}
30
31// -----
32
33#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed), crdWidth=32}>
34// CHECK-LABEL: func @sparse_unpack(
35//  CHECK-SAME: %[[T:.*]]: tensor<100xf64, #
36//  CHECK-SAME: %[[OP:.*]]: tensor<2xindex>,
37//  CHECK-SAME: %[[OI:.*]]: tensor<6x1xi32>,
38//  CHECK-SAME: %[[OD:.*]]: tensor<6xf64>)
39//       CHECK: %[[P:.*]]:2, %[[D:.*]], %[[PL:.*]]:2, %[[DL:.*]] = sparse_tensor.disassemble %[[T]]
40//       CHECK: return %[[P]]#0, %[[P]]#1, %[[D]]
41func.func @sparse_unpack(%sp : tensor<100xf64, #SparseVector>,
42                         %op : tensor<2xindex>,
43                         %oi : tensor<6x1xi32>,
44                         %od : tensor<6xf64>)
45                       -> (tensor<2xindex>, tensor<6x1xi32>, tensor<6xf64>) {
46  %rp, %ri, %d, %rpl, %ril, %dl = sparse_tensor.disassemble %sp : tensor<100xf64, #SparseVector>
47                  out_lvls(%op, %oi : tensor<2xindex>, tensor<6x1xi32>)
48                  out_vals(%od : tensor<6xf64>)
49                  -> (tensor<2xindex>, tensor<6x1xi32>), tensor<6xf64>, (index, index), index
50  return %rp, %ri, %d : tensor<2xindex>, tensor<6x1xi32>, tensor<6xf64>
51}
52
53// -----
54
55#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
56
57// CHECK-LABEL: func @sparse_dealloc(
58// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>
59//       CHECK: bufferization.dealloc_tensor %[[A]] : tensor<128xf64, #{{.*}}>
60//       CHECK: return
61func.func @sparse_dealloc(%arg0: tensor<128xf64, #SparseVector>) {
62  bufferization.dealloc_tensor %arg0 : tensor<128xf64, #SparseVector>
63  return
64}
65
66// -----
67
68#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
69
70// CHECK-LABEL: func @sparse_convert_1d_to_sparse(
71// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
72//       CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<64xf32> to tensor<64xf32, #{{.*}}>
73//       CHECK: return %[[T]] : tensor<64xf32, #{{.*}}>
74func.func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32, #SparseVector> {
75  %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
76  return %0 : tensor<64xf32, #SparseVector>
77}
78
79// -----
80
81#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) }>
82
83// CHECK-LABEL: func @sparse_convert_3d_from_sparse(
84// CHECK-SAME: %[[A:.*]]: tensor<8x8x8xf64, #{{.*}}>)
85//       CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<8x8x8xf64, #{{.*}}> to tensor<8x8x8xf64>
86//       CHECK: return %[[T]] : tensor<8x8x8xf64>
87func.func @sparse_convert_3d_from_sparse(%arg0: tensor<8x8x8xf64, #SparseTensor>) -> tensor<8x8x8xf64> {
88  %0 = sparse_tensor.convert %arg0 : tensor<8x8x8xf64, #SparseTensor> to tensor<8x8x8xf64>
89  return %0 : tensor<8x8x8xf64>
90}
91
92// -----
93
94#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
95
96// CHECK-LABEL: func @sparse_positions(
97//  CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
98//       CHECK: %[[T:.*]] = sparse_tensor.positions %[[A]] {level = 0 : index} : tensor<128xf64, #{{.*}}> to memref<?xindex>
99//       CHECK: return %[[T]] : memref<?xindex>
100func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
101  %0 = sparse_tensor.positions %arg0 {level = 0 : index} : tensor<128xf64, #SparseVector> to memref<?xindex>
102  return %0 : memref<?xindex>
103}
104
105// -----
106
107#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
108
109// CHECK-LABEL: func @sparse_indices_buffer(
110//  CHECK-SAME: %[[A:.*]]: tensor<?x?xf64, #{{.*}}>)
111//       CHECK: %[[T:.*]] = sparse_tensor.coordinates_buffer %[[A]] : tensor<?x?xf64, #{{.*}}> to memref<?xindex>
112//       CHECK: return %[[T]] : memref<?xindex>
113func.func @sparse_indices_buffer(%arg0: tensor<?x?xf64, #COO>) -> memref<?xindex> {
114  %0 = sparse_tensor.coordinates_buffer %arg0 : tensor<?x?xf64, #COO> to memref<?xindex>
115  return %0 : memref<?xindex>
116}
117
118// -----
119
120#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
121
122// CHECK-LABEL: func @sparse_indices(
123//  CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
124//       CHECK: %[[T:.*]] = sparse_tensor.coordinates %[[A]] {level = 0 : index} : tensor<128xf64, #{{.*}}> to memref<?xindex>
125//       CHECK: return %[[T]] : memref<?xindex>
126func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
127  %0 = sparse_tensor.coordinates %arg0 {level = 0 : index} : tensor<128xf64, #SparseVector> to memref<?xindex>
128  return %0 : memref<?xindex>
129}
130
131// -----
132
133#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
134
135// CHECK-LABEL: func @sparse_values(
136//  CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
137//       CHECK: %[[T:.*]] = sparse_tensor.values %[[A]] : tensor<128xf64, #{{.*}}> to memref<?xf64>
138//       CHECK: return %[[T]] : memref<?xf64>
139func.func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
140  %0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref<?xf64>
141  return %0 : memref<?xf64>
142}
143
144// -----
145
146#CSR_SLICE = #sparse_tensor.encoding<{
147  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
148}>
149
150// CHECK-LABEL: func @sparse_slice_offset(
151//  CHECK-SAME: %[[A:.*]]: tensor<2x8xf64, #{{.*}}>)
152//       CHECK: %[[T:.*]] = sparse_tensor.slice.offset %[[A]] at 1 : tensor<2x8xf64, #{{.*}}>
153//       CHECK: return %[[T]] : index
154func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
155  %0 = sparse_tensor.slice.offset %arg0 at 1 : tensor<2x8xf64, #CSR_SLICE>
156  return %0 : index
157}
158
159// -----
160
161#CSR_SLICE = #sparse_tensor.encoding<{
162  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
163}>
164
165// CHECK-LABEL: func @sparse_slice_stride(
166//  CHECK-SAME: %[[A:.*]]: tensor<2x8xf64, #{{.*}}>)
167//       CHECK: %[[T:.*]] = sparse_tensor.slice.stride %[[A]] at 1 : tensor<2x8xf64, #{{.*}}>
168//       CHECK: return %[[T]] : index
169func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
170  %0 = sparse_tensor.slice.stride %arg0 at 1 : tensor<2x8xf64, #CSR_SLICE>
171  return %0 : index
172}
173
174// -----
175
176#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
177
178// CHECK-LABEL: func @sparse_metadata_init(
179//       CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#{{.*}}>
180//       CHECK: return %[[T]] : !sparse_tensor.storage_specifier<#{{.*}}>
181func.func @sparse_metadata_init() -> !sparse_tensor.storage_specifier<#SparseVector> {
182  %0 = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#SparseVector>
183  return %0 : !sparse_tensor.storage_specifier<#SparseVector>
184}
185
186// -----
187
188#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
189#SparseVector_Slice = #sparse_tensor.encoding<{
190  map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
191}>
192
193// CHECK-LABEL: func @sparse_metadata_init(
194//  CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>
195//       CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.init with %[[A]] :
196//       CHECK: return %[[T]] : !sparse_tensor.storage_specifier<#{{.*}}>
197func.func @sparse_metadata_init(%src : !sparse_tensor.storage_specifier<#SparseVector>)
198                                    -> !sparse_tensor.storage_specifier<#SparseVector_Slice> {
199  %0 = sparse_tensor.storage_specifier.init with %src : from !sparse_tensor.storage_specifier<#SparseVector>
200                                                          to !sparse_tensor.storage_specifier<#SparseVector_Slice>
201  return %0 : !sparse_tensor.storage_specifier<#SparseVector_Slice>
202}
203
204// -----
205
206#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
207
208// CHECK-LABEL: func @sparse_get_md(
209//  CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>
210//       CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] lvl_sz at 0
211//       CHECK: return %[[T]] : index
212func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index {
213  %0 = sparse_tensor.storage_specifier.get %arg0 lvl_sz at 0
214       : !sparse_tensor.storage_specifier<#SparseVector>
215  return %0 : index
216}
217
218// -----
219
220#SparseVector_Slice = #sparse_tensor.encoding<{
221  map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
222}>
223
224// CHECK-LABEL: func @sparse_get_md(
225//  CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>
226//       CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] dim_offset at 0
227//       CHECK: return %[[T]] : index
228func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector_Slice>) -> index {
229  %0 = sparse_tensor.storage_specifier.get %arg0 dim_offset at 0
230       : !sparse_tensor.storage_specifier<#SparseVector_Slice>
231  return %0 : index
232}
233
234// -----
235
236#SparseVector = #sparse_tensor.encoding<{
237  map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
238}>
239
240// CHECK-LABEL: func @sparse_get_md(
241//  CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>
242//       CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] dim_stride at 0
243//       CHECK: return %[[T]] : index
244func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index {
245  %0 = sparse_tensor.storage_specifier.get %arg0 dim_stride at 0
246       : !sparse_tensor.storage_specifier<#SparseVector>
247  return %0 : index
248}
249
250
251// -----
252
253#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
254
255// CHECK-LABEL: func @sparse_set_md(
256//  CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>,
257//  CHECK-SAME: %[[I:.*]]: index)
258//       CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.set %[[A]] lvl_sz at 0 with %[[I]]
259//       CHECK: return %[[T]] : !sparse_tensor.storage_specifier<#{{.*}}>
260func.func @sparse_set_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index)
261          -> !sparse_tensor.storage_specifier<#SparseVector> {
262  %0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1
263       : !sparse_tensor.storage_specifier<#SparseVector>
264  return %0 : !sparse_tensor.storage_specifier<#SparseVector>
265}
266
267// -----
268
269#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
270
271// CHECK-LABEL: func @sparse_noe(
272//  CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
273//       CHECK: %[[T:.*]] = sparse_tensor.number_of_entries %[[A]] : tensor<128xf64, #{{.*}}>
274//       CHECK: return %[[T]] : index
275func.func @sparse_noe(%arg0: tensor<128xf64, #SparseVector>) -> index {
276  %0 = sparse_tensor.number_of_entries %arg0 : tensor<128xf64, #SparseVector>
277  return %0 : index
278}
279
280// -----
281
282#DenseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : dense)}>
283
284// CHECK-LABEL: func @sparse_load(
285//  CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>)
286//       CHECK: %[[T:.*]] = sparse_tensor.load %[[A]] : tensor<16x32xf64, #{{.*}}>
287//       CHECK: return %[[T]] : tensor<16x32xf64, #{{.*}}>
288func.func @sparse_load(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
289  %0 = sparse_tensor.load %arg0 : tensor<16x32xf64, #DenseMatrix>
290  return %0 : tensor<16x32xf64, #DenseMatrix>
291}
292
293// -----
294
295#DenseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : dense)}>
296
297// CHECK-LABEL: func @sparse_load_ins(
298//  CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>)
299//       CHECK: %[[T:.*]] = sparse_tensor.load %[[A]] hasInserts : tensor<16x32xf64, #{{.*}}>
300//       CHECK: return %[[T]] : tensor<16x32xf64, #{{.*}}>
301func.func @sparse_load_ins(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
302  %0 = sparse_tensor.load %arg0 hasInserts : tensor<16x32xf64, #DenseMatrix>
303  return %0 : tensor<16x32xf64, #DenseMatrix>
304}
305
306// -----
307
308#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
309
310// CHECK-LABEL: func @sparse_insert(
311//  CHECK-SAME: %[[A:.*]]: tensor<128xf64, #sparse{{[0-9]*}}>,
312//  CHECK-SAME: %[[B:.*]]: index,
313//  CHECK-SAME: %[[C:.*]]: f64)
314//       CHECK: %[[T:.*]] = tensor.insert %[[C]] into %[[A]][%[[B]]] : tensor<128xf64, #{{.*}}>
315//       CHECK: return %[[T]] : tensor<128xf64, #{{.*}}>
316func.func @sparse_insert(%arg0: tensor<128xf64, #SparseVector>, %arg1: index, %arg2: f64) -> tensor<128xf64, #SparseVector> {
317  %0 = tensor.insert %arg2 into %arg0[%arg1] : tensor<128xf64, #SparseVector>
318  return %0 : tensor<128xf64, #SparseVector>
319}
320
321// -----
322
323// CHECK-LABEL: func @sparse_push_back(
324//  CHECK-SAME: %[[A:.*]]: index,
325//  CHECK-SAME: %[[B:.*]]: memref<?xf64>,
326//  CHECK-SAME: %[[C:.*]]: f64) -> (memref<?xf64>, index) {
327//       CHECK: %[[D:.*]] = sparse_tensor.push_back %[[A]], %[[B]], %[[C]] : index, memref<?xf64>, f64
328//       CHECK: return %[[D]]
329func.func @sparse_push_back(%arg0: index, %arg1: memref<?xf64>, %arg2: f64) -> (memref<?xf64>, index) {
330  %0:2 = sparse_tensor.push_back %arg0, %arg1, %arg2 : index, memref<?xf64>, f64
331  return %0#0, %0#1 : memref<?xf64>, index
332}
333
334// -----
335
336// CHECK-LABEL: func @sparse_push_back_inbound(
337//  CHECK-SAME: %[[A:.*]]: index,
338//  CHECK-SAME: %[[B:.*]]: memref<?xf64>,
339//  CHECK-SAME: %[[C:.*]]: f64) -> (memref<?xf64>, index) {
340//       CHECK: %[[D:.*]] = sparse_tensor.push_back inbounds %[[A]], %[[B]], %[[C]] : index, memref<?xf64>, f64
341//       CHECK: return %[[D]]
342func.func @sparse_push_back_inbound(%arg0: index, %arg1: memref<?xf64>, %arg2: f64) -> (memref<?xf64>, index) {
343  %0:2 = sparse_tensor.push_back inbounds %arg0, %arg1, %arg2 : index, memref<?xf64>, f64
344  return %0#0, %0#1 : memref<?xf64>, index
345}
346
347// -----
348
349// CHECK-LABEL: func @sparse_push_back_n(
350//  CHECK-SAME: %[[A:.*]]: index,
351//  CHECK-SAME: %[[B:.*]]: memref<?xf64>,
352//  CHECK-SAME: %[[C:.*]]: f64,
353//  CHECK-SAME: %[[D:.*]]: index) -> (memref<?xf64>, index) {
354//       CHECK: %[[E:.*]] = sparse_tensor.push_back %[[A]], %[[B]], %[[C]], %[[D]] : index, memref<?xf64>, f64, index
355//       CHECK: return %[[E]]
356func.func @sparse_push_back_n(%arg0: index, %arg1: memref<?xf64>, %arg2: f64, %arg3: index) -> (memref<?xf64>, index) {
357  %0:2 = sparse_tensor.push_back %arg0, %arg1, %arg2, %arg3 : index, memref<?xf64>, f64, index
358  return %0#0, %0#1 : memref<?xf64>, index
359}
360
361// -----
362
363#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
364
365// CHECK-LABEL: func @sparse_expansion(
366//  CHECK-SAME: %[[A:.*]]: tensor<8x8xf64, #sparse{{[0-9]*}}>)
367//       CHECK: %{{.*}}, %{{.*}}, %{{.*}}, %[[T:.*]] = sparse_tensor.expand %[[A]]
368//       CHECK: return %[[T]] : index
369func.func @sparse_expansion(%tensor: tensor<8x8xf64, #SparseMatrix>) -> index {
370  %values, %filled, %added, %count = sparse_tensor.expand %tensor
371    : tensor<8x8xf64, #SparseMatrix> to memref<?xf64>, memref<?xi1>, memref<?xindex>
372  return %count : index
373}
374
375// -----
376
377#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
378
379// CHECK-LABEL: func @sparse_compression(
380//  CHECK-SAME: %[[A0:.*0]]: memref<?xf64>,
381//  CHECK-SAME: %[[A1:.*1]]: memref<?xi1>,
382//  CHECK-SAME: %[[A2:.*2]]: memref<?xindex>,
383//  CHECK-SAME: %[[A3:.*3]]: index
384//  CHECK-SAME: %[[A4:.*4]]: tensor<8x8xf64, #sparse{{[0-9]*}}>,
385//  CHECK-SAME: %[[A5:.*5]]: index)
386//       CHECK: %[[T:.*]] = sparse_tensor.compress %[[A0]], %[[A1]], %[[A2]], %[[A3]] into %[[A4]][%[[A5]]
387//       CHECK: return %[[T]] : tensor<8x8xf64, #sparse{{[0-9]*}}>
388func.func @sparse_compression(%values: memref<?xf64>,
389                              %filled: memref<?xi1>,
390                              %added: memref<?xindex>,
391                              %count: index,
392			      %tensor: tensor<8x8xf64, #SparseMatrix>,
393			      %index: index) -> tensor<8x8xf64, #SparseMatrix> {
394  %0 = sparse_tensor.compress %values, %filled, %added, %count into %tensor[%index]
395    : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<8x8xf64, #SparseMatrix>
396  return %0 : tensor<8x8xf64, #SparseMatrix>
397}
398
399// -----
400
401#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
402
403// CHECK-LABEL: func @sparse_out(
404//  CHECK-SAME: %[[A:.*]]: tensor<?x?xf64, #sparse{{[0-9]*}}>,
405//  CHECK-SAME: %[[B:.*]]: !llvm.ptr)
406//       CHECK: sparse_tensor.out %[[A]], %[[B]] : tensor<?x?xf64, #sparse{{[0-9]*}}>, !llvm.ptr
407//       CHECK: return
408func.func @sparse_out(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr) {
409  sparse_tensor.out %arg0, %arg1 : tensor<?x?xf64, #SparseMatrix>, !llvm.ptr
410  return
411}
412
413// -----
414
415#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
416
417// CHECK-LABEL: func @sparse_binary(
418//  CHECK-SAME:   %[[A:.*]]: f64, %[[B:.*]]: i64) -> f64 {
419//       CHECK:   %[[Z:.*]] = arith.constant 0.000000e+00 : f64
420//       CHECK:   %[[C1:.*]] = sparse_tensor.binary %[[A]], %[[B]] : f64, i64 to f64
421//       CHECK:     overlap = {
422//       CHECK:       ^bb0(%[[A1:.*]]: f64, %[[B1:.*]]: i64):
423//       CHECK:         sparse_tensor.yield %[[A1]] : f64
424//       CHECK:     }
425//       CHECK:     left = identity
426//       CHECK:     right = {
427//       CHECK:       ^bb0(%[[A2:.*]]: i64):
428//       CHECK:         sparse_tensor.yield %[[Z]] : f64
429//       CHECK:     }
430//       CHECK:   return %[[C1]] : f64
431//       CHECK: }
432func.func @sparse_binary(%arg0: f64, %arg1: i64) -> f64 {
433  %cf0 = arith.constant 0.0 : f64
434  %r = sparse_tensor.binary %arg0, %arg1 : f64, i64 to f64
435    overlap={
436      ^bb0(%x: f64, %y: i64):
437        sparse_tensor.yield %x : f64
438    }
439    left=identity
440    right={
441      ^bb0(%y: i64):
442        sparse_tensor.yield %cf0 : f64
443    }
444  return %r : f64
445}
446
447// -----
448
449#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
450
451// CHECK-LABEL: func @sparse_unary(
452//  CHECK-SAME:   %[[A:.*]]: f64) -> f64 {
453//       CHECK:   %[[C1:.*]] = sparse_tensor.unary %[[A]] : f64 to f64
454//       CHECK:     present = {
455//       CHECK:       ^bb0(%[[A1:.*]]: f64):
456//       CHECK:         sparse_tensor.yield %[[A1]] : f64
457//       CHECK:     }
458//       CHECK:     absent = {
459//       CHECK:       %[[R:.*]] = arith.constant -1.000000e+00 : f64
460//       CHECK:       sparse_tensor.yield %[[R]] : f64
461//       CHECK:     }
462//       CHECK:   return %[[C1]] : f64
463//       CHECK: }
464func.func @sparse_unary(%arg0: f64) -> f64 {
465  %r = sparse_tensor.unary %arg0 : f64 to f64
466    present={
467      ^bb0(%x: f64):
468        sparse_tensor.yield %x : f64
469    } absent={
470      ^bb0:
471        %cf1 = arith.constant -1.0 : f64
472        sparse_tensor.yield %cf1 : f64
473    }
474  return %r : f64
475}
476
477// -----
478
479#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
480
481// CHECK-LABEL: func @sparse_unary(
482//  CHECK-SAME:   %[[A:.*]]: f64) -> i64 {
483//       CHECK:   %[[C1:.*]] = sparse_tensor.unary %[[A]] : f64 to i64
484//       CHECK:     present = {
485//       CHECK:       ^bb0(%[[A1:.*]]: f64):
486//       CHECK:         %[[R:.*]] = arith.fptosi %[[A1]] : f64 to i64
487//       CHECK:         sparse_tensor.yield %[[R]] : i64
488//       CHECK:     }
489//       CHECK:     absent = {
490//       CHECK:     }
491//       CHECK:   return %[[C1]] : i64
492//       CHECK: }
493func.func @sparse_unary(%arg0: f64) -> i64 {
494  %r = sparse_tensor.unary %arg0 : f64 to i64
495    present={
496      ^bb0(%x: f64):
497        %ret = arith.fptosi %x : f64 to i64
498        sparse_tensor.yield %ret : i64
499    }
500    absent={}
501  return %r : i64
502}
503
504// -----
505
506#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
507
508// CHECK-LABEL: func @sparse_reduce_2d_to_1d(
509//  CHECK-SAME:   %[[A:.*]]: f64, %[[B:.*]]: f64) -> f64 {
510//       CHECK:   %[[Z:.*]] = arith.constant 0.000000e+00 : f64
511//       CHECK:   %[[C1:.*]] = sparse_tensor.reduce %[[A]], %[[B]], %[[Z]] : f64 {
512//       CHECK:       ^bb0(%[[A1:.*]]: f64, %[[B1:.*]]: f64):
513//       CHECK:         sparse_tensor.yield %[[A1]] : f64
514//       CHECK:     }
515//       CHECK:   return %[[C1]] : f64
516//       CHECK: }
517func.func @sparse_reduce_2d_to_1d(%arg0: f64, %arg1: f64) -> f64 {
518  %cf0 = arith.constant 0.0 : f64
519  %r = sparse_tensor.reduce %arg0, %arg1, %cf0 : f64 {
520      ^bb0(%x: f64, %y: f64):
521        sparse_tensor.yield %x : f64
522    }
523  return %r : f64
524}
525
526// -----
527
528#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
529
530// CHECK-LABEL: func @sparse_select(
531//  CHECK-SAME:   %[[A:.*]]: f64) -> f64 {
532//       CHECK:   %[[Z:.*]] = arith.constant 0.000000e+00 : f64
533//       CHECK:   %[[C1:.*]] = sparse_tensor.select %[[A]] : f64 {
534//       CHECK:       ^bb0(%[[A1:.*]]: f64):
535//       CHECK:         %[[B1:.*]] = arith.cmpf ogt, %[[A1]], %[[Z]] : f64
536//       CHECK:         sparse_tensor.yield %[[B1]] : i1
537//       CHECK:     }
538//       CHECK:   return %[[C1]] : f64
539//       CHECK: }
540func.func @sparse_select(%arg0: f64) -> f64 {
541  %cf0 = arith.constant 0.0 : f64
542  %r = sparse_tensor.select %arg0 : f64 {
543      ^bb0(%x: f64):
544        %cmp = arith.cmpf "ogt", %x, %cf0 : f64
545        sparse_tensor.yield %cmp : i1
546    }
547  return %r : f64
548}
549
550// -----
551
552#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
553
554// CHECK-LABEL: func @concat_sparse_sparse(
555//  CHECK-SAME:   %[[A0:.*]]: tensor<2x4xf64
556//  CHECK-SAME:   %[[A1:.*]]: tensor<3x4xf64
557//  CHECK-SAME:   %[[A2:.*]]: tensor<4x4xf64
558//       CHECK:   %[[TMP0:.*]] = sparse_tensor.concatenate %[[A0]], %[[A1]], %[[A2]] {dimension = 0 : index} :
559//  CHECK-SAME:   tensor<2x4xf64
560//  CHECK-SAME:   tensor<3x4xf64
561//  CHECK-SAME:   tensor<4x4xf64
562//  CHECK-SAME:   tensor<9x4xf64
563//       CHECK:   return %[[TMP0]] : tensor<9x4xf64
564func.func @concat_sparse_sparse(%arg0: tensor<2x4xf64, #SparseMatrix>,
565                                %arg1: tensor<3x4xf64, #SparseMatrix>,
566                                %arg2: tensor<4x4xf64, #SparseMatrix>) -> tensor<9x4xf64, #SparseMatrix> {
567  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
568       : tensor<2x4xf64, #SparseMatrix>,
569         tensor<3x4xf64, #SparseMatrix>,
570         tensor<4x4xf64, #SparseMatrix> to tensor<9x4xf64, #SparseMatrix>
571  return %0 : tensor<9x4xf64, #SparseMatrix>
572}
573
574// -----
575
576#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
577
578// CHECK-LABEL: func @sparse_tensor_foreach(
579//  CHECK-SAME: %[[A0:.*]]: tensor<2x4xf64
580//       CHECK: sparse_tensor.foreach in %[[A0]] :
581//       CHECK:  ^bb0(%arg1: index, %arg2: index, %arg3: f64):
582func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () {
583  sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do {
584    ^bb0(%1: index, %2: index, %v: f64) :
585  }
586  return
587}
588
589// -----
590
591#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
592
593// CHECK-LABEL: func @sparse_tensor_foreach(
594//  CHECK-SAME:   %[[A0:.*]]: tensor<2x4xf64, #sparse{{[0-9]*}}>,
595//  CHECK-SAME:   %[[A1:.*]]: f32
596//  CHECK-NEXT:   %[[RET:.*]] = sparse_tensor.foreach in %[[A0]] init(%[[A1]])
597//  CHECK-NEXT:    ^bb0(%[[TMP_1:.*]]: index, %[[TMP_2:.*]]: index, %[[TMP_v:.*]]: f64, %[[TMP_r:.*]]: f32)
598//       CHECK:      sparse_tensor.yield %[[TMP_r]] : f32
599//       CHECK:  }
600func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () {
601  %ret = sparse_tensor.foreach in %arg0 init(%arg1): tensor<2x4xf64, #DCSR>, f32 -> f32
602  do {
603    ^bb0(%1: index, %2: index, %v: f64, %r: f32) :
604      sparse_tensor.yield %r : f32
605  }
606  return
607}
608
609// -----
610
611#ID_MAP = affine_map<(i,j) -> (i,j)>
612
613// CHECK-LABEL: func @sparse_sort_coo(
614//  CHECK-SAME: %[[A:.*]]: index,
615//  CHECK-SAME: %[[B:.*]]: memref<?xindex>)
616//       CHECK: sparse_tensor.sort hybrid_quick_sort %[[A]], %[[B]] {ny = 1 : index, perm_map = #{{.*}}} : memref<?xindex>
617//       CHECK: return %[[B]]
618func.func @sparse_sort_coo(%arg0: index, %arg1: memref<?xindex>) -> (memref<?xindex>) {
619  sparse_tensor.sort hybrid_quick_sort %arg0, %arg1 {perm_map = #ID_MAP, ny = 1 : index}: memref<?xindex>
620  return %arg1 : memref<?xindex>
621}
622
623// -----
624
625#ID_MAP = affine_map<(i,j) -> (i,j)>
626
627// CHECK-LABEL: func @sparse_sort_coo_stable(
628//  CHECK-SAME: %[[A:.*]]: index,
629//  CHECK-SAME: %[[B:.*]]: memref<?xi64>,
630//  CHECK-SAME: %[[C:.*]]: memref<?xf32>)
631//       CHECK: sparse_tensor.sort insertion_sort_stable %[[A]], %[[B]] jointly %[[C]] {ny = 1 : index, perm_map = #{{.*}}}
632//       CHECK: return %[[B]], %[[C]]
633func.func @sparse_sort_coo_stable(%arg0: index, %arg1: memref<?xi64>, %arg2: memref<?xf32>) -> (memref<?xi64>, memref<?xf32>) {
634  sparse_tensor.sort insertion_sort_stable %arg0, %arg1 jointly %arg2 {perm_map = #ID_MAP, ny = 1 : index}: memref<?xi64> jointly memref<?xf32>
635  return %arg1, %arg2 : memref<?xi64>, memref<?xf32>
636}
637
638// -----
639
640#UnorderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))}>
641#OrderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
642
643// CHECK-LABEL: func @sparse_reorder_coo(
644//  CHECK-SAME: %[[A:.*]]: tensor<?x?xf32, #sparse{{[0-9]*}}>
645//       CHECK: %[[R:.*]] = sparse_tensor.reorder_coo quick_sort %[[A]]
646//       CHECK: return %[[R]]
647func.func @sparse_reorder_coo(%arg0 : tensor<?x?xf32, #UnorderedCOO>) -> tensor<?x?xf32, #OrderedCOO> {
648  %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor<?x?xf32, #UnorderedCOO> to tensor<?x?xf32, #OrderedCOO>
649  return %ret : tensor<?x?xf32, #OrderedCOO>
650}
651
652
653// -----
654
655#BSR = #sparse_tensor.encoding<{
656  map = ( i, j ) ->
657  ( i floordiv 2 : dense,
658    j floordiv 3 : compressed,
659    i mod 2      : dense,
660    j mod 3      : dense
661  )
662}>
663
664// CHECK-LABEL:   func.func @sparse_crd_translate(
665// CHECK-SAME:      %[[VAL_0:.*]]: index,
666// CHECK-SAME:      %[[VAL_1:.*]]: index)
667// CHECK:           %[[VAL_2:.*]]:4 = sparse_tensor.crd_translate  dim_to_lvl{{\[}}%[[VAL_0]], %[[VAL_1]]]
668// CHECK:           return %[[VAL_2]]#0, %[[VAL_2]]#1, %[[VAL_2]]#2, %[[VAL_2]]#3
669func.func @sparse_crd_translate(%arg0: index, %arg1: index) -> (index, index, index, index) {
670  %l0, %l1, %l2, %l3 = sparse_tensor.crd_translate dim_to_lvl [%arg0, %arg1] as #BSR : index, index, index, index
671  return  %l0, %l1, %l2, %l3 : index, index, index, index
672}
673
674// -----
675
676#BSR = #sparse_tensor.encoding<{
677  map = ( i, j ) ->
678  ( i floordiv 2 : dense,
679    j floordiv 3 : compressed,
680    i mod 2      : dense,
681    j mod 3      : dense
682  )
683}>
684
685// CHECK-LABEL:   func.func @sparse_lvl(
686// CHECK-SAME:      %[[VAL_0:.*]]: index,
687// CHECK-SAME:      %[[VAL_1:.*]]: tensor
688// CHECK:           %[[VAL_2:.*]] = sparse_tensor.lvl %[[VAL_1]], %[[VAL_0]]
689// CHECK:           return %[[VAL_2]]
690func.func @sparse_lvl(%arg0: index, %t : tensor<?x?xi32, #BSR>) -> index {
691  %l0 = sparse_tensor.lvl %t, %arg0 : tensor<?x?xi32, #BSR>
692  return  %l0 : index
693}
694
695// -----
696
697#BSR = #sparse_tensor.encoding<{
698  map = ( i, j ) -> ( i floordiv 2 : dense,
699                      j floordiv 3 : compressed,
700                      i mod 2      : dense,
701                      j mod 3      : dense
702  )
703}>
704
705#DSDD = #sparse_tensor.encoding<{
706  map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
707}>
708
709// CHECK-LABEL:   func.func @sparse_reinterpret_map(
710// CHECK-SAME:      %[[A0:.*]]: tensor<6x12xi32, #sparse{{[0-9]*}}>)
711// CHECK:           %[[VAL:.*]] = sparse_tensor.reinterpret_map %[[A0]]
712// CHECK:           return %[[VAL]]
713func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x3xi32, #DSDD> {
714  %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
715                                         to tensor<3x4x2x3xi32, #DSDD>
716  return %t1 : tensor<3x4x2x3xi32, #DSDD>
717}
718
719// -----
720
721#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
722
723// CHECK-LABEL:   func.func @sparse_print(
724// CHECK-SAME:      %[[A0:.*]]: tensor<10x10xf64, #sparse{{[0-9]*}}>)
725// CHECK:           sparse_tensor.print %[[A0]]
726// CHECK:           return
727func.func @sparse_print(%arg0: tensor<10x10xf64, #CSR>) {
728  sparse_tensor.print %arg0 : tensor<10x10xf64, #CSR>
729  return
730}
731
732// -----
733
734// CHECK-LABEL:   func.func @sparse_has_runtime() -> i1
735// CHECK:           %[[H:.*]] = sparse_tensor.has_runtime_library
736// CHECK:           return %[[H]] : i1
737func.func @sparse_has_runtime() -> i1 {
738  %has_runtime = sparse_tensor.has_runtime_library
739  return %has_runtime : i1
740}
741
742// -----
743
744#COO = #sparse_tensor.encoding<{
745  map = (i, j) -> (
746    i : compressed(nonunique),
747    j : singleton(soa)
748  )
749}>
750
751// CHECK-LABEL:   func.func @sparse_extract_value(
752// CHECK-SAME:      %[[VAL_0:.*]]: tensor<4x8xf32, #sparse>,
753// CHECK-SAME:      %[[VAL_1:.*]]: !sparse_tensor.iterator<#sparse, lvls = 1>) -> f32 {
754// CHECK:           %[[VAL_2:.*]] = sparse_tensor.extract_value %[[VAL_0]] at %[[VAL_1]] : tensor<4x8xf32, #sparse>, !sparse_tensor.iterator<#sparse, lvls = 1>
755// CHECK:           return %[[VAL_2]] : f32
756// CHECK:         }
757func.func @sparse_extract_value(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#COO, lvls = 1>) -> f32 {
758  %f = sparse_tensor.extract_value %sp at %it1 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 1>
759  return %f : f32
760}
761
762
763// -----
764
765#COO = #sparse_tensor.encoding<{
766  map = (i, j) -> (
767    i : compressed(nonunique),
768    j : singleton(soa)
769  )
770}>
771
772// CHECK-LABEL:   func.func @sparse_extract_iter_space(
773// CHECK-SAME:      %[[VAL_0:.*]]: tensor<4x8xf32, #sparse{{[0-9]*}}>,
774// CHECK-SAME:      %[[VAL_1:.*]]: !sparse_tensor.iterator<#sparse{{[0-9]*}}, lvls = 0>)
775// CHECK:           %[[VAL_2:.*]] = sparse_tensor.extract_iteration_space %[[VAL_0]] lvls = 0
776// CHECK:           %[[VAL_3:.*]] = sparse_tensor.extract_iteration_space %[[VAL_0]] at %[[VAL_1]] lvls = 1
777// CHECK:           return %[[VAL_2]], %[[VAL_3]] : !sparse_tensor.iter_space<#sparse{{[0-9]*}}, lvls = 0>, !sparse_tensor.iter_space<#sparse{{[0-9]*}}, lvls = 1>
778// CHECK:         }
779func.func @sparse_extract_iter_space(%sp : tensor<4x8xf32, #COO>, %it1 : !sparse_tensor.iterator<#COO, lvls = 0>)
780  -> (!sparse_tensor.iter_space<#COO, lvls = 0>, !sparse_tensor.iter_space<#COO, lvls = 1>) {
781  // Extracting the iteration space for the first level needs no parent iterator.
782  %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0 : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 0>
783  // Extracting the iteration space for the second level needs a parent iterator.
784  %l2 = sparse_tensor.extract_iteration_space %sp at %it1 lvls = 1 : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 0>
785                                                                 -> !sparse_tensor.iter_space<#COO, lvls = 1>
786  return %l1, %l2 : !sparse_tensor.iter_space<#COO, lvls = 0>, !sparse_tensor.iter_space<#COO, lvls = 1>
787}
788
789
790// -----
791
792#COO = #sparse_tensor.encoding<{
793  map = (i, j) -> (
794    i : compressed(nonunique),
795    j : singleton(soa)
796  )
797}>
798
799// CHECK-LABEL:   func.func @sparse_iterate(
800// CHECK-SAME:      %[[VAL_0:.*]]: tensor<4x8xf32, #sparse{{[0-9]*}}>,
801// CHECK-SAME:      %[[VAL_1:.*]]: index,
802// CHECK-SAME:      %[[VAL_2:.*]]: index) -> index {
803// CHECK:           %[[VAL_3:.*]] = sparse_tensor.extract_iteration_space %[[VAL_0]] lvls = 0 : tensor<4x8xf32, #sparse{{[0-9]*}}>
804// CHECK:           %[[VAL_4:.*]] = sparse_tensor.iterate %[[VAL_5:.*]] in %[[VAL_3]] at(%[[VAL_6:.*]]) iter_args(%[[VAL_7:.*]] = %[[VAL_1]]) : !sparse_tensor.iter_space<#sparse{{[0-9]*}}, lvls = 0> -> index {
805// CHECK:             sparse_tensor.yield %[[VAL_7]] : index
806// CHECK:           }
807// CHECK:           return %[[VAL_4]] : index
808// CHECK:         }
809func.func @sparse_iterate(%sp : tensor<4x8xf32, #COO>, %i : index, %j : index) -> index {
810  %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0 : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 0>
811  %r1 = sparse_tensor.iterate %it1 in %l1 at (%crd) iter_args(%outer = %i): !sparse_tensor.iter_space<#COO, lvls = 0 to 1> -> index {
812    sparse_tensor.yield %outer : index
813  }
814  return %r1 : index
815}
816
817
818// -----
819
820#COO = #sparse_tensor.encoding<{
821  map = (i, j) -> (
822    i : compressed(nonunique),
823    j : singleton(soa)
824  )
825}>
826
827
828// CHECK-LABEL:   func.func @sparse_coiteration(
829// CHECK-SAME:      %[[SP1:.*]]: !sparse_tensor.iter_space<#sparse, lvls = 0>,
830// CHECK-SAME:      %[[SP2:.*]]: !sparse_tensor.iter_space<#sparse, lvls = 1>) -> index {
831// CHECK:           %[[INIT:.*]] = arith.constant 0 : index
832// CHECK:           %[[RET:.*]] = sparse_tensor.coiterate (%[[SP1]], %[[SP2]]) at(%[[COORD:.*]]) iter_args(%[[ARG:.*]] = %[[INIT]])
833// CHECK:           case %[[VAL_6:.*]], _ {
834// CHECK:             sparse_tensor.yield %[[ARG]] : index
835// CHECK:           }
836// CHECK:           return %[[RET]] : index
837// CHECK:         }
838func.func @sparse_coiteration(%sp1 : !sparse_tensor.iter_space<#COO, lvls = 0>,
839                              %sp2 : !sparse_tensor.iter_space<#COO, lvls = 1>) -> index {
840  %init = arith.constant 0 : index
841  %ret = sparse_tensor.coiterate (%sp1, %sp2) at (%coord) iter_args(%arg = %init)
842       : (!sparse_tensor.iter_space<#COO, lvls = 0>, !sparse_tensor.iter_space<#COO, lvls = 1>)
843       -> index
844  case %it1, _ {
845    sparse_tensor.yield %arg : index
846  }
847  return %ret : index
848}
849