xref: /llvm-project/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td (revision 77f8297c6fdaa62121ddb108043dcaad5c45c7ad)
1//===- SparseTensorOps.td - Sparse tensor dialect ops ------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SPARSETENSOR_OPS
10#define SPARSETENSOR_OPS
11
12include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td"
13include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
14include "mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td"
15include "mlir/Dialect/SparseTensor/IR/SparseTensorInterfaces.td"
16include "mlir/Interfaces/InferTypeOpInterface.td"
17include "mlir/Interfaces/SideEffectInterfaces.td"
18include "mlir/Interfaces/ControlFlowInterfaces.td"
19include "mlir/Interfaces/LoopLikeInterface.td"
20
21//===----------------------------------------------------------------------===//
22// Base class.
23//===----------------------------------------------------------------------===//
24
25class SparseTensor_Op<string mnemonic, list<Trait> traits = []>
26  : Op<SparseTensor_Dialect, mnemonic, traits>;
27
28//===----------------------------------------------------------------------===//
29// Sparse Tensor Operations.
30//===----------------------------------------------------------------------===//
31
32def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]> {
33  string summary = "Materializes a new sparse tensor from given source";
34  string description = [{
35    Materializes a sparse tensor with contents taken from an opaque pointer
36    provided by `source`. For targets that have access to a file system,
37    for example, this pointer may be a filename (or file) of a sparse
38    tensor in a particular external storage format. The form of the operation
39    is kept deliberately very general to allow for alternative implementations
40    in the future, such as pointers to buffers or runnable initialization
41    code. The operation is provided as an anchor that materializes a properly
42    typed sparse tensor with inital contents into a computation.
43
44    Reading in a symmetric matrix will result in just the lower/upper triangular
45    part of the matrix (so that only relevant information is stored). Proper
46    symmetry support for operating on symmetric matrices is still TBD.
47
48    Example:
49
50    ```mlir
51    sparse_tensor.new %source : !Source to tensor<1024x1024xf64, #CSR>
52    ```
53  }];
54
55  let arguments = (ins AnyType:$source);
56  let results = (outs AnySparseTensor:$result);
57  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
58}
59
60def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]> {
61  let summary = "Returns a sparse tensor assembled from the given levels and values";
62  let description = [{
63    Assembles the per-level position and coordinate arrays together with
64    the values arrays into a sparse tensor. The order and types of the
65    provided levels must be consistent with the actual storage layout of
66    the returned sparse tensor described below.
67
68    - `levels: [tensor<? x iType>, ...]`
69      supplies the sparse tensor position and coordinate arrays
70      of the sparse tensor for the corresponding level as specifed by
71      `sparse_tensor::StorageLayout`.
72    - `values : tensor<? x V>`
73      supplies the values array for the stored elements in the sparse tensor.
74
75    This operation can be used to assemble a sparse tensor from an
76    external source; e.g., by passing numpy arrays from Python. It
77    is the user's responsibility to provide input that can be correctly
78    interpreted by the sparsifier, which does not perform any sanity
79    test to verify data integrity.
80
81    Example:
82
83    ```mlir
84    %pos    = arith.constant dense<[0, 3]>                : tensor<2xindex>
85    %index  = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex>
86    %values = arith.constant dense<[ 1.1,   2.2,   3.3 ]> : tensor<3xf64>
87    %s = sparse_tensor.assemble (%pos, %index), %values
88       : (tensor<2xindex>, tensor<3x2xindex>), tensor<3xf64> to tensor<3x4xf64, #COO>
89    // yields COO format |1.1, 0.0, 0.0, 0.0|
90    //     of 3x4 matrix |0.0, 0.0, 2.2, 3.3|
91    //                   |0.0, 0.0, 0.0, 0.0|
92    ```
93  }];
94
95  let arguments = (ins Variadic<RankedTensorOf<[AnySignlessIntegerOrIndex]>>:$levels,
96                       RankedTensorOf<[AnyType]>:$values);
97  let results = (outs AnySparseTensor: $result);
98  let assemblyFormat =
99    "` ` `(` $levels       `)` `,` $values attr-dict `:`"
100    "    `(` type($levels) `)` `,` type($values) `to` type($result)";
101
102  let hasVerifier = 1;
103}
104
105def SparseTensor_DisassembleOp : SparseTensor_Op<"disassemble", [Pure, SameVariadicResultSize]> {
106  let summary = "Copies the levels and values of the given sparse tensor";
107  let description = [{
108    The disassemble operation is the inverse of `sparse_tensor::assemble`.
109    It copies the per-level position and coordinate arrays together with
110    the values array of the given sparse tensor into the user-supplied buffers
111    along with the actual length of the memory used in each returned buffer.
112
113    This operation can be used for returning a disassembled MLIR sparse tensor;
114    e.g., copying the sparse tensor contents into pre-allocated numpy arrays
115    back to Python. It is the user's responsibility to allocate large enough
116    buffers of the appropriate types to hold the sparse tensor contents.
117    The sparsifier simply copies all fields of the sparse tensor into the
118    user-supplied buffers without any sanity test to verify data integrity.
119
120    Example:
121
122    ```mlir
123    // input COO format |1.1, 0.0, 0.0, 0.0|
124    //    of 3x4 matrix |0.0, 0.0, 2.2, 3.3|
125    //                  |0.0, 0.0, 0.0, 0.0|
126    %p, %c, %v, %p_len, %c_len, %v_len =
127      sparse_tensor.disassemble %s : tensor<3x4xf64, #COO>
128         out_lvls(%op, %oi : tensor<2xindex>, tensor<3x2xindex>)
129         out_vals(%od : tensor<3xf64>) ->
130           (tensor<2xindex>, tensor<3x2xindex>), tensor<3xf64>, (index, index), index
131    // %p = arith.constant dense<[ 0,              3 ]> : tensor<2xindex>
132    // %c = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex>
133    // %v = arith.constant dense<[ 1.1,   2.2,   3.3 ]> : tensor<3xf64>
134    // %p_len = 2
135    // %c_len = 6 (3x2)
136    // %v_len = 3
137    ```
138  }];
139
140  let arguments = (ins AnySparseTensor:$tensor,
141                       Variadic<RankedTensorOf<[AnySignlessIntegerOrIndex]>>:$out_levels,
142                       RankedTensorOf<[AnyType]>:$out_values);
143  let results = (outs Variadic<RankedTensorOf<[AnySignlessIntegerOrIndex]>>:$ret_levels,
144                      RankedTensorOf<[AnyType]>:$ret_values,
145                      Variadic<AnyIndexingScalarLike>:$lvl_lens,
146                      AnyIndexingScalarLike:$val_len);
147  let assemblyFormat =
148    "$tensor attr-dict `:` type($tensor)"
149    "`out_lvls` `(` $out_levels `:` type($out_levels) `)` "
150    "`out_vals` `(` $out_values `:` type($out_values) `)` `->`"
151    "`(` type($ret_levels) `)` `,` type($ret_values) `,` "
152    "`(` type($lvl_lens)   `)` `,` type($val_len)";
153
154  let hasVerifier = 1;
155}
156
157def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
158  [Pure, StageWithSortSparseOpInterface]> {
159  string summary = "Converts between different tensor types";
160  string description = [{
161    Converts one sparse or dense tensor type to another tensor type. The rank
162    of the source and destination types must match exactly, and the dimension
163    sizes must either match exactly or relax from a static to a dynamic size.
164    The sparse encoding of the two types can obviously be completely different.
165    The name `convert` was preferred over `cast`, since the operation may incur
166    a non-trivial cost.
167
168    When converting between two different sparse tensor types, only explicitly
169    stored values are moved from one underlying sparse storage format to
170    the other. When converting from an unannotated dense tensor type to a
171    sparse tensor type, an explicit test for nonzero values is used. When
172    converting to an unannotated dense tensor type, implicit zeroes in the
173    sparse storage format are made explicit. Note that the conversions can have
174    non-trivial costs associated with them, since they may involve elaborate
175    data structure transformations. Also, conversions from sparse tensor types
176    into dense tensor types may be infeasible in terms of storage requirements.
177
178    Trivial dense-to-dense convert will be removed by canonicalization while
179    trivial sparse-to-sparse convert will be removed by the sparse codegen. This
180    is because we use trivial sparse-to-sparse convert to tell bufferization
181    that the sparse codegen will expand the tensor buffer into sparse tensor
182    storage.
183
184    Examples:
185
186    ```mlir
187    %0 = sparse_tensor.convert %a : tensor<32x32xf32> to tensor<32x32xf32, #CSR>
188    %1 = sparse_tensor.convert %a : tensor<32x32xf32> to tensor<?x?xf32, #CSR>
189    %2 = sparse_tensor.convert %b : tensor<8x8xi32, #CSC> to tensor<8x8xi32, #CSR>
190    %3 = sparse_tensor.convert %c : tensor<4x8xf64, #CSR> to tensor<4x?xf64, #CSC>
191
192    // The following conversion is not allowed (since it would require a
193    // runtime assertion that the source's dimension size is actually 100).
194    %4 = sparse_tensor.convert %d : tensor<?xf64> to tensor<100xf64, #SV>
195    ```
196
197  }];
198
199  let arguments = (ins AnyRankedTensor:$source);
200  let results = (outs AnyRankedTensor:$dest);
201  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
202
203  let extraClassDeclaration = [{
204     // Whether the convert can be done by a single step or it would require
205     // an extra sort. Inherited from StageWithSortSparseOpInterface.
206     bool needsExtraSort();
207  }];
208
209  let hasFolder = 1;
210  let hasVerifier = 1;
211}
212
213def SparseTensor_ReinterpretMapOp : SparseTensor_Op<"reinterpret_map",
214    [NoMemoryEffect]> {
215  let summary = "Reinterprets the dimension/level maps of the source tensor";
216  let description = [{
217    Reinterprets the dimension-to-level and level-to-dimension map specified in
218    `source` according to the type of `dest`.
219    `reinterpret_map` is a no-op and is introduced merely to resolve type conflicts.
220    It does not make any modification to the source tensor and source/dest tensors
221    are considered to be aliases.
222
223    `source` and `dest` tensors are "reinterpretable" if and only if they have
224    the exactly same storage at a low level.
225    That is, both `source` and `dest` has the same number of levels and level types,
226    and their shape is consistent before and after `reinterpret_map`.
227
228    Example:
229    ```mlir
230    #CSC = #sparse_tensor.encoding<{
231      map = (d0, d1) -> (d1: dense, d0: compressed)
232    }>
233    #CSR = #sparse_tensor.encoding<{
234      map = (d0, d1) -> (d0: dense, d1: compressed)
235    }>
236    %t1 = sparse_tensor.reinterpret_map %t0 : tensor<3x4xi32, #CSC> to tensor<4x3xi32, #CSR>
237
238    #BSR = #sparse_tensor.encoding<{
239      map = ( i, j ) -> ( i floordiv 2 : dense,
240                          j floordiv 3 : compressed,
241                          i mod 2      : dense,
242                          j mod 3      : dense
243      )
244    }>
245    #DSDD = #sparse_tensor.encoding<{
246      map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
247    }>
248    %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR> to tensor<3x4x2x3xi32, #DSDD>
249    ```
250    }];
251
252  let arguments = (ins AnySparseTensor:$source);
253  let results = (outs AnySparseTensor:$dest);
254  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
255
256  let builders = [
257    OpBuilder<(ins "SparseTensorEncodingAttr":$dstEnc, "Value":$source)>
258  ];
259
260  let hasFolder = 1;
261  let hasVerifier = 1;
262}
263
264def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions",
265      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
266  let summary = "Extracts the `level`-th positions array of the `tensor`";
267  let description = [{
268    Returns the positions array of the tensor's storage at the given
269    level.  This is similar to the `bufferization.to_memref` operation
270    in the sense that it provides a bridge between a tensor world view
271    and a bufferized world view.  Unlike the `bufferization.to_memref`
272    operation, however, this sparse operation actually lowers into code
273    that extracts the positions array from the sparse storage itself
274    (either by calling a support library or through direct code).
275
276    Writing into the result of this operation is undefined behavior.
277
278    Example:
279
280    ```mlir
281    %1 = sparse_tensor.positions %0 { level = 1 : index }
282       : tensor<64x64xf64, #CSR> to memref<?xindex>
283    ```
284  }];
285
286  let arguments = (ins AnySparseTensor:$tensor, LevelAttr:$level);
287  let results = (outs AnyNon0RankedMemRef:$result);
288  let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
289
290  let hasVerifier = 1;
291}
292
293def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates",
294      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
295  let summary = "Extracts the `level`-th coordinates array of the `tensor`";
296  let description = [{
297    Returns the coordinates array of the tensor's storage at the given
298    level.  This is similar to the `bufferization.to_memref` operation
299    in the sense that it provides a bridge between a tensor world view
300    and a bufferized world view.  Unlike the `bufferization.to_memref`
301    operation, however, this sparse operation actually lowers into code
302    that extracts the coordinates array from the sparse storage itself
303    (either by calling a support library or through direct code).
304
305    Writing into the result of this operation is undefined behavior.
306
307    Example:
308
309    ```mlir
310    %1 = sparse_tensor.coordinates %0 { level = 1 : index }
311       : tensor<64x64xf64, #CSR> to memref<?xindex>
312    ```
313  }];
314
315  let arguments = (ins AnySparseTensor:$tensor, LevelAttr:$level);
316  let results = (outs AnyNon0RankedMemRef:$result);
317  let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
318
319  let hasVerifier = 1;
320}
321
322def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer",
323      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
324  let summary = "Extracts the linear coordinates array from a tensor";
325  let description = [{
326    Returns the linear coordinates array for a sparse tensor with
327    a trailing COO region with at least two levels.  It is an error
328    if the tensor doesn't contain such a COO region.  This is similar
329    to the `bufferization.to_memref` operation in the sense that it
330    provides a bridge between a tensor world view and a bufferized
331    world view.  Unlike the `bufferization.to_memref` operation,
332    however, this operation actually lowers into code that extracts
333    the linear coordinates array from the sparse storage scheme that
334    stores the coordinates for the COO region as an array of structures.
335    For example, a 2D COO sparse tensor with two non-zero elements at
336    coordinates (1, 3) and (4, 6) are stored in a linear buffer as
337    (1, 4, 3, 6) instead of two buffer as (1, 4) and (3, 6).
338
339    Writing into the result of this operation is undefined behavior.
340
341    Example:
342
343    ```mlir
344    %1 = sparse_tensor.coordinates_buffer %0
345       : tensor<64x64xf64, #COO> to memref<?xindex>
346    ```
347  }];
348
349  let arguments = (ins AnySparseTensor:$tensor);
350  let results = (outs AnyNon0RankedMemRef:$result);
351  let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
352
353  let hasVerifier = 1;
354}
355
356def SparseTensor_ToValuesOp : SparseTensor_Op<"values",
357      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
358  let summary = "Extracts numerical values array from a tensor";
359  let description = [{
360    Returns the values array of the sparse storage format for the given
361    sparse tensor, independent of the actual dimension. This is similar to
362    the `bufferization.to_memref` operation in the sense that it provides a bridge
363    between a tensor world view and a bufferized world view. Unlike the
364    `bufferization.to_memref` operation, however, this sparse operation actually
365    lowers into code that extracts the values array from the sparse storage
366    scheme (either by calling a support library or through direct code).
367
368    Writing into the result of this operation is undefined behavior.
369
370    Example:
371
372    ```mlir
373    %1 = sparse_tensor.values %0 : tensor<64x64xf64, #CSR> to memref<?xf64>
374    ```
375  }];
376
377  let arguments = (ins AnySparseTensor:$tensor);
378  let results = (outs AnyNon0RankedMemRef:$result);
379  let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
380
381  let hasVerifier = 1;
382}
383
384def SparseTensor_NumberOfEntriesOp : SparseTensor_Op<"number_of_entries", [Pure]> {
385  let summary = "Returns the number of entries that are stored in the tensor.";
386  let description = [{
387    Returns the number of entries that are stored in the given sparse tensor.
388    Note that this is typically the number of nonzero elements in the tensor,
389    but since explicit zeros may appear in the storage formats, the more
390    accurate nomenclature is used.
391
392    Example:
393
394    ```mlir
395    %noe = sparse_tensor.number_of_entries %tensor : tensor<64x64xf64, #CSR>
396    ```
397  }];
398
399  let arguments = (ins AnySparseTensor:$tensor);
400  let results = (outs Index:$result);
401  let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
402}
403
404def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate",
405      [Pure, StageWithSortSparseOpInterface]> {
406  let summary = "Concatenates a list of tensors into a single tensor.";
407  let description = [{
408     Concatenates a list input tensors and the output tensor with the same
409     dimension-rank.  The concatenation happens on the specified `dimension`
410     (0 <= dimension < dimRank).  The resulting `dimension` size is the
411     sum of all the input sizes for that dimension, while all the other
412     dimensions should have the same size in the input and output tensors.
413
414     Only statically-sized input tensors are accepted, while the output tensor
415     can be dynamically-sized.
416
417     Example:
418
419     ```mlir
420     %0 = sparse_tensor.concatenate %1, %2 { dimension = 0 : index }
421       : tensor<64x64xf64, #CSR>, tensor<64x64xf64, #CSR> to tensor<128x64xf64, #CSR>
422     ```
423   }];
424
425  let extraClassDeclaration = [{
426     // Whether the concatenate can be done by a single step or it would require
427     // an extra sort. Inherited from StageWithSortSparseOpInterface.
428     bool needsExtraSort();
429  }];
430
431  let arguments = (ins Variadic<AnyRankedTensor>:$inputs, DimensionAttr:$dimension);
432  let results = (outs AnyRankedTensor:$result);
433  let assemblyFormat = "$inputs attr-dict `:` type($inputs) `to` type($result)";
434
435  let hasVerifier = 1;
436}
437
438def SparseTensor_ToSliceOffsetOp : SparseTensor_Op<"slice.offset", [Pure]> {
439  let summary = "Extracts the offset of the sparse tensor slice at the given dimension";
440  let description = [{
441    Extracts the offset of the sparse tensor slice at the given dimension.
442
443    Currently, sparse tensor slices are still a work in progress, and only
444    works when runtime library is disabled (i.e., running the sparsifier
445    with `enable-runtime-library=false`).
446
447    Example:
448
449    ```mlir
450    %0 = tensor.extract_slice %s[%v1, %v2][64, 64][1, 1] : tensor<128x128xf64, #DCSR>
451                                                        to tensor<64x64xf64, #Slice>
452
453    %1 = sparse_tensor.slice.offset %0 at 0 : tensor<64x64xf64, #Slice>
454    %2 = sparse_tensor.slice.offset %0 at 1 : tensor<64x64xf64, #Slice>
455    // %1 = %v1
456    // %2 = %v2
457    ```
458  }];
459
460  let arguments = (ins AnySparseTensorSlice:$slice, IndexAttr:$dim);
461  let results = (outs Index:$offset);
462  let assemblyFormat = "$slice `at` $dim attr-dict `:` type($slice)";
463
464  let hasVerifier = 1;
465}
466
467def SparseTensor_ToSliceStrideOp : SparseTensor_Op<"slice.stride", [Pure]> {
468  let summary = "Extracts the stride of the sparse tensor slice at the given dimension";
469  let description = [{
470    Extracts the stride of the sparse tensor slice at the given dimension.
471
472    Currently, sparse tensor slices are still a work in progress, and only
473    works when runtime library is disabled (i.e., running the sparsifier
474    with `enable-runtime-library=false`).
475
476    Example:
477
478    ```mlir
479    %0 = tensor.extract_slice %s[%v1, %v2][64, 64][%s1, %s2] : tensor<128x128xf64, #DCSR>
480                                                            to tensor<64x64xf64, #Slice>
481
482    %1 = sparse_tensor.slice.stride %0 at 0 : tensor<64x64xf64, #Slice>
483    %2 = sparse_tensor.slice.stride %0 at 1 : tensor<64x64xf64, #Slice>
484    // %1 = %s1
485    // %2 = %s2
486
487    ```
488  }];
489
490  let arguments = (ins AnySparseTensorSlice:$slice, IndexAttr:$dim);
491  let results = (outs Index:$stride);
492  let assemblyFormat = "$slice `at` $dim attr-dict `:` type($slice)";
493
494  let hasVerifier = 1;
495}
496
497//===----------------------------------------------------------------------===//
498// Sparse Tensor Storage Specifier Operations.
499//===----------------------------------------------------------------------===//
500
501def SparseTensor_StorageSpecifierInitOp : SparseTensor_Op<"storage_specifier.init",
502      [Pure]> {
503  let summary = "";
504  let description = [{
505    Returns an initial storage specifier value.  A storage specifier
506    value holds the level-sizes, position arrays, coordinate arrays,
507    and the value array.
508    If this is a specifier for slices, it also holds the extra strides/offsets
509    for each tensor dimension.
510
511    TODO: The sparse tensor slice support is currently in a unstable state, and
512    is subject to change in the future.
513
514    Example:
515
516    ```mlir
517    #CSR = #sparse_tensor.encoding<{
518      map = (i, j) -> (i : dense, j : compressed)
519    }>
520    #CSR_SLICE = #sparse_tensor.encoding<{
521      map = (d0 : #sparse_tensor<slice(1, 4, 1)>,
522             d1 : #sparse_tensor<slice(1, 4, 2)>) ->
523            (d0 : dense, d1 : compressed)
524    }>
525
526    %0 = sparse_tensor.storage_specifier.init :  !sparse_tensor.storage_specifier<#CSR>
527    %1 = sparse_tensor.storage_specifier.init with %src
528         : !sparse_tensor.storage_specifier<#CSR> to
529           !sparse_tensor.storage_specifier<#CSR_SLICE>
530    ```
531  }];
532
533  let arguments = (ins Optional<SparseTensorStorageSpecifier>:$source);
534  let results = (outs SparseTensorStorageSpecifier:$result);
535  let assemblyFormat = "attr-dict (`with` $source^)? `:` (`from` qualified(type($source))^ `to`)?"
536                                                        " qualified(type($result))";
537  let builders = [
538    OpBuilder<(ins "Type":$result),
539    [{
540      build($_builder, $_state, result, Value());
541    }]>
542  ];
543
544
545}
546
547def SparseTensor_GetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.get", [Pure]> {
548  let summary = "";
549  let description = [{
550    Returns the requested field of the given storage_specifier.
551
552    Example of querying the size of the coordinates array for level 0:
553
554    ```mlir
555    %0 = sparse_tensor.storage_specifier.get %arg0 crd_mem_sz at 0
556         : !sparse_tensor.storage_specifier<#COO>
557    ```
558  }];
559
560  let arguments = (ins SparseTensorStorageSpecifier:$specifier,
561                   SparseTensorStorageSpecifierKindAttr:$specifierKind,
562                   OptionalAttr<LevelAttr>:$level);
563  let results = (outs Index:$result);
564  let assemblyFormat = "$specifier $specifierKind (`at` $level^)? attr-dict"
565                       "`:` qualified(type($specifier))";
566
567  let hasVerifier = 1;
568  let hasFolder = 1;
569}
570
571def SparseTensor_SetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.set",
572    [Pure, AllTypesMatch<["result", "specifier"]>]> {
573  let summary = "";
574  let description = [{
575    Set the field of the storage specifier to the given input value. Returns
576    the updated storage_specifier as a new SSA value.
577
578    Example of updating the sizes of the coordinates array for level 0:
579
580    ```mlir
581    %0 = sparse_tensor.storage_specifier.set %arg0 crd_mem_sz at 0 with %new_sz
582       : !sparse_tensor.storage_specifier<#COO>
583    ```
584  }];
585
586  let arguments = (ins SparseTensorStorageSpecifier:$specifier,
587                   SparseTensorStorageSpecifierKindAttr:$specifierKind,
588                   OptionalAttr<LevelAttr>:$level,
589                   Index:$value);
590  let results = (outs SparseTensorStorageSpecifier:$result);
591  let assemblyFormat = "$specifier $specifierKind (`at` $level^)? `with` $value"
592                       " attr-dict `:` qualified(type($result))";
593
594  let hasVerifier = 1;
595}
596
597//===----------------------------------------------------------------------===//
598// Sparse Tensor Coordinate Operations.
599//===----------------------------------------------------------------------===//
600
601def SparseTensor_LvlOp : SparseTensor_Op<"lvl", [ConditionallySpeculatable, NoMemoryEffect]> {
602  let summary = "level index operation";
603  let description = [{
604    The `sparse_tensor.lvl` behaves similar to `tensor.dim` operation.
605    It takes a sparse tensor and a level operand of type `index` and returns
606    the size of the requested level of the given sparse tensor.
607    If the sparse tensor has an identity dimension to level mapping, it returns
608    the same result as `tensor.dim`.
609    If the level index is out of bounds, the behavior is undefined.
610
611    Example:
612
613    ```mlir
614    #BSR = #sparse_tensor.encoding<{
615      map = ( i, j ) ->
616        ( i floordiv 2 : dense,
617          j floordiv 3 : compressed,
618          i mod 2      : dense,
619          j mod 3      : dense
620        )
621    }>
622
623    // Always returns 2 (4 floordiv 2), can be constant folded:
624    %c0 = arith.constant 0 : index
625    %x = sparse_tensor.lvl %A, %c0 : tensor<4x?xf32, #BSR>
626
627    // Return the dynamic dimension of %A computed by %j mod 3.
628    %c1 = arith.constant 1 : index
629    %y = sparse_tensor.lvl %A, %c1 : tensor<4x?xf32, #BSR>
630
631    // Always return 3 (since j mod 3 < 3), can be constant fold
632    %c3 = arith.constant 3 : index
633    %y = sparse_tensor.lvl %A, %c3 : tensor<4x?xf32, #BSR>
634    ```
635  }];
636
637  let arguments = (ins AnySparseTensor:$source, Index:$index);
638  let results = (outs Index:$result);
639  let assemblyFormat = "attr-dict $source `,` $index `:` type($source) ";
640
641  let builders = [
642    OpBuilder<(ins "Value":$source, "int64_t":$index)>
643  ];
644
645  let extraClassDeclaration = [{
646    /// Helper function to get the index as a simple integer if it is constant.
647    std::optional<uint64_t> getConstantLvlIndex();
648
649    /// Interface method for ConditionallySpeculatable.
650    Speculation::Speculatability getSpeculatability();
651  }];
652
653  let hasVerifier = 1;
654  let hasFolder = 1;
655}
656
657def SparseTensor_CrdTranslateOp : SparseTensor_Op<"crd_translate", [Pure]> {
658  string summary = "Performs coordinate translation between level and dimension coordinate space.";
659  string description = [{
660    Performs coordinate translation between level and dimension coordinate space according
661    to the affine maps defined by $encoder.
662
663    Example:
664
665    ```mlir
666    %l0, %l1, %l2, %l3 = sparse_tensor.crd_translate dim_to_lvl [%d0, %d1] as #BSR
667                       : index, index, index, index
668    ```
669  }];
670
671  let arguments = (ins Variadic<Index>:$in_crds,
672                   SparseTensorCrdTransDirectionAttr:$direction,
673                   SparseTensorEncodingAttr:$encoder);
674  let results = (outs Variadic<Index>:$out_crds);
675  let assemblyFormat = "$direction `[` $in_crds `]` `as` $encoder attr-dict `:` type($out_crds)";
676
677  let hasVerifier = 1;
678  let hasFolder = 1;
679}
680
681//===----------------------------------------------------------------------===//
682// Sparse Tensor Management Operations. These operations are "impure" in the
683// sense that some behavior is defined by side-effects. These operations provide
684// a bridge between "sparsification" on one hand and a support library or actual
685// code generation on the other hand. The semantics of these operations may be
686// refined over time as our sparse abstractions evolve.
687//===----------------------------------------------------------------------===//
688
689def SparseTensor_PushBackOp : SparseTensor_Op<"push_back",
690    [TypesMatchWith<"value type matches element type of inBuffer",
691                    "inBuffer", "value",
692                    "::llvm::cast<ShapedType>($_self).getElementType()">,
693     AllTypesMatch<["inBuffer", "outBuffer"]>]> {
694  string summary = "Pushes a value to the back of a given buffer";
695  string description = [{
696    Pushes `value` to the end of the given sparse tensor storage buffer
697    `inBuffer` as indicated by the value of `curSize` and returns the
698    new size of the buffer in `newSize` (`newSize = curSize + n`).
699    The capacity of the buffer is recorded in the memref type of `inBuffer`.
700    If the current buffer is full, then `inBuffer.realloc` is called before
701    pushing the data to the buffer. This is similar to std::vector push_back.
702
703    The optional input `n` specifies the number of times to repeately push
704    the value to the back of the tensor. When `n` is a compile-time constant,
705    its value can't be less than 1. If `n` is a runtime value that is less
706    than 1, the behavior is undefined. Although using input `n` is semantically
707    equivalent to calling push_back n times, it gives compiler more chances to
708    to optimize the memory reallocation and the filling of the memory with the
709    same value.
710
711    The `inbounds` attribute tells the compiler that the insertion won't go
712    beyond the current storage buffer. This allows the compiler to not generate
713    the code for capacity check and reallocation. The typical usage will be for
714    "dynamic" sparse tensors for which a capacity can be set beforehand.
715
716    Note that this operation is "impure" in the sense that even though
717    the result is modeled through an SSA value, referencing the memref
718    through the old SSA value after this operation is undefined behavior.
719
720    Example:
721
722    ```mlir
723    %buf, %newSize = sparse_tensor.push_back %curSize, %buffer, %val
724       : index, memref<?xf64>, f64
725    ```
726
727    ```mlir
728    %buf, %newSize = sparse_tensor.push_back inbounds %curSize, %buffer, %val
729       : xindex, memref<?xf64>, f64
730    ```
731
732    ```mlir
733    %buf, %newSize = sparse_tensor.push_back inbounds %curSize, %buffer, %val, %n
734       : xindex, memref<?xf64>, f64
735    ```
736  }];
737
738  let arguments = (ins Index:$curSize,
739                       StridedMemRefRankOf<[AnyType], [1]>:$inBuffer,
740                       AnyType:$value, Optional<Index>:$n,
741                       UnitAttr:$inbounds);
742  let results = (outs StridedMemRefRankOf<[AnyType], [1]>:$outBuffer,
743                      Index:$newSize);
744  let assemblyFormat = "(`inbounds` $inbounds^)? $curSize `,` $inBuffer"
745                       " `,` $value (`,` $n^ )?  attr-dict `:`"
746                       " type($curSize) `,` type($inBuffer) `,`"
747                       " type($value) (`,` type($n)^ )?";
748
749  let builders = [
750    // Build an op (reusing type from curSize and inBuffer) without input `n`
751    OpBuilder<(ins "Value":$curSize, "Value":$inBuffer, "Value":$value)>
752  ];
753
754  let hasVerifier = 1;
755}
756
757def SparseTensor_ExpandOp : SparseTensor_Op<"expand", []> {
758  string summary = "Expands an access pattern for insertion";
759  string description = [{
760    Performs an access pattern expansion for the innermost levels of the
761    given tensor. This operation is useful to implement kernels in which a
762    sparse tensor appears as output. This technique is known under several
763    different names and using several alternative implementations,
764    for example, phase counter [Gustavson72], expanded or switch array
765    [Pissanetzky84], in phase scan [Duff90], access pattern expansion [Bik96],
766    and workspaces [Kjolstad19].
767
768    The `values` and `filled` arrays must have lengths equal to the
769    level-size of the innermost level (i.e., as if the innermost level
770    were *dense*).  The `added` array and `count` are used to store new
771    level-coordinates when a false value is encountered in the `filled`
772    array.  All arrays should be allocated before the loop (possibly even
773    shared between loops in a future optimization) so that their *dense*
774    initialization can be amortized over many iterations.  Setting and
775    resetting the dense arrays in the loop nest itself is kept *sparse*
776    by only iterating over set elements through an indirection using
777    the added array, so that the operations are kept proportional to
778    the number of nonzeros.
779
780    Note that this operation is "impure" in the sense that even though the
781    results are modeled through SSA values, the operation relies on a proper
782    side-effecting context that sets and resets the expanded arrays.
783
784    Example:
785
786    ```mlir
787    %values, %filled, %added, %count = sparse_tensor.expand %tensor
788      : tensor<4x4xf64, #CSR> to memref<?xf64>, memref<?xi1>, memref<?xindex>
789    ```
790  }];
791
792
793  let arguments = (ins AnySparseTensor:$tensor);
794  let results = (outs AnyStridedMemRefOfRank<1>:$values,
795                      StridedMemRefRankOf<[I1],[1]>:$filled,
796                      StridedMemRefRankOf<[Index],[1]>:$added,
797                      Index:$count);
798  let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($values)"
799                       " `,` type($filled) `,` type($added)";
800}
801
802def SparseTensor_CompressOp : SparseTensor_Op<"compress",
803    [AllTypesMatch<["tensor", "result"]>]> {
804  string summary = "Compressed an access pattern for insertion";
805  string description = [{
806    Finishes a single access pattern expansion by moving inserted elements
807    into the sparse storage scheme of the given tensor with the given
808    level-coordinates.  The arity of `lvlCoords` is one less than the
809    level-rank of the tensor, with the coordinate of the innermost
810    level defined through the `added` array.  The `values` and `filled`
811    arrays are reset in a *sparse* fashion by only iterating over set
812    elements through an indirection using the `added` array, so that
813    the operations are kept proportional to the number of nonzeros.
814    See the `sparse_tensor.expand` operation for more details.
815
816    Note that this operation is "impure" in the sense that even though
817    the result is modeled through an SSA value, the insertion is eventually
818    done "in place", and referencing the old SSA value is undefined behavior.
819
820    Example:
821
822    ```mlir
823    %result = sparse_tensor.compress %values, %filled, %added, %count into %tensor[%i]
824      : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<4x4xf64, #CSR>
825    ```
826  }];
827
828  let arguments = (ins AnyStridedMemRefOfRank<1>:$values,
829                   StridedMemRefRankOf<[I1],[1]>:$filled,
830                   StridedMemRefRankOf<[Index],[1]>:$added,
831                   Index:$count,
832                   AnySparseTensor:$tensor,
833                   Variadic<Index>:$lvlCoords);
834  let results = (outs AnySparseTensor:$result);
835  let assemblyFormat = "$values `,` $filled `,` $added `,` $count"
836                       " `into` $tensor `[` $lvlCoords `]` attr-dict"
837                       " `:` type($values) `,` type($filled) `,` type($added)"
838                       " `,` type($tensor)";
839  let hasVerifier = 1;
840}
841
842def SparseTensor_LoadOp : SparseTensor_Op<"load", [SameOperandsAndResultType]> {
843  let summary =
844    "Rematerializes tensor from underlying sparse storage format";
845  let description = [{
846    Rematerializes a tensor from the underlying sparse storage format of the
847    given tensor. This is similar to the `bufferization.to_tensor` operation
848    in the sense that it provides a bridge between a bufferized world view
849    and a tensor world view. Unlike the `bufferization.to_tensor` operation,
850    however, this sparse operation is used only temporarily to maintain a
851    correctly typed intermediate representation during progressive
852    bufferization.
853
854    The `hasInserts` attribute denote whether insertions to the underlying
855    sparse storage format may have occurred, in which case the underlying
856    sparse storage format needs to be finalized. Otherwise, the operation
857    simply folds away.
858
859    Note that this operation is "impure" in the sense that even though
860    the result is modeled through an SSA value, the operation relies on
861    a proper context of materializing and inserting the tensor value.
862
863    Examples:
864
865    ```mlir
866    %result = sparse_tensor.load %tensor : tensor<8xf64, #SV>
867
868    %1 = sparse_tensor.load %0 hasInserts : tensor<16x32xf32, #CSR>
869    ```
870  }];
871
872  let arguments = (ins AnySparseTensor:$tensor, UnitAttr:$hasInserts);
873  let results = (outs AnyTensor:$result);
874  let assemblyFormat = "$tensor (`hasInserts` $hasInserts^)? attr-dict `:` type($tensor)";
875}
876
877def SparseTensor_OutOp : SparseTensor_Op<"out", []> {
878  string summary = "Outputs a sparse tensor to the given destination";
879  string description = [{
880    Outputs the contents of a sparse tensor to the destination defined by an
881    opaque pointer provided by `dest`. For targets that have access to a file
882    system, for example, this pointer may specify a filename (or file) for output.
883    The form of the operation is kept deliberately very general to allow for
884    alternative implementations in the future, such as sending the contents to
885    a buffer defined by a pointer.
886
887    Note that this operation is "impure" in the sense that its behavior
888    is solely defined by side-effects and not SSA values.
889
890    Example:
891
892    ```mlir
893    sparse_tensor.out %t, %dest : tensor<1024x1024xf64, #CSR>, !Dest
894    ```
895  }];
896
897  let arguments = (ins AnySparseTensor:$tensor, AnyType:$dest);
898  let assemblyFormat = "$tensor `,` $dest attr-dict `:` type($tensor) `,` type($dest)";
899}
900
901//===----------------------------------------------------------------------===//
902// Sparse Tensor Sorting/Ordering Operations.
903//===----------------------------------------------------------------------===//
904
905def SparseTensor_SortOp : SparseTensor_Op<"sort"> {
906  let summary = "Sorts the arrays in xs and ys lexicographically on the "
907                "integral values found in the xs list";
908  let description = [{
909    Sorts the `xs` values along with some `ys` values that are put in a single linear
910    buffer `xy`.  The affine map attribute `perm_map` specifies the permutation to be
911    applied on the `xs` before comparison, the rank of the permutation map
912    also specifies the number of `xs` values in `xy`.
913    The optional index attribute `ny` provides the number of `ys` values in `xy`.
914    When `ny` is not explicitly specified, its value is 0.
915    This instruction supports a more efficient way to store the COO definition
916    in sparse tensor type.
917
918    The buffer xy should have a dimension not less than n * (rank(perm_map) + ny) while the
919    buffers in `ys` should have a dimension not less than `n`. The behavior of
920    the operator is undefined if this condition is not met.
921
922    Example:
923
924    ```mlir
925    sparse_tensor.sort insertion_sort_stable %n, %x { perm_map = affine_map<(i,j) -> (j,i)> }
926      : memref<?xindex>
927    ```
928  }];
929
930  let arguments = (ins Index:$n,
931                       StridedMemRefRankOf<[AnyInteger, Index], [1]>:$xy,
932                       Variadic<StridedMemRefRankOf<[AnyType], [1]>>:$ys,
933                       AffineMapAttr:$perm_map, OptionalAttr<IndexAttr>:$ny,
934                       SparseTensorSortKindAttr:$algorithm);
935  let assemblyFormat = "$algorithm $n"
936                       "`,`$xy (`jointly` $ys^)? attr-dict"
937                       "`:` type($xy) (`jointly` type($ys)^)?";
938  let hasVerifier = 1;
939}
940
941def SparseTensor_ReorderCOOOp : SparseTensor_Op<"reorder_coo", [Pure]> {
942  let summary = "Reorder the input COO such that it has the the same order as "
943                "the output COO";
944  let description = [{
945    Reorders the input COO to the same order as specified by the output format.
946    E.g., reorder an unordered COO into an ordered one.
947
948    The input and result COO tensor must have the same element type, position type and
949    coordinate type. At the moment, the operation also only supports ordering
950    input and result COO with the same dim2lvl map.
951
952    Example:
953
954    ```mlir
955    %res = sparse_tensor.reorder_coo quick_sort %coo : tensor<?x?xf64 : #Unordered_COO> to
956                                                       tensor<?x?xf64 : #Ordered_COO>
957
958    ```
959  }];
960
961  let arguments = (ins AnySparseTensor: $input_coo,
962                       SparseTensorSortKindAttr:$algorithm);
963  let results = (outs AnySparseTensor: $result_coo);
964  let assemblyFormat = "$algorithm $input_coo attr-dict"
965                       "`:` type($input_coo) `to` type($result_coo)";
966
967  let hasFolder = 1;
968  let hasVerifier = 1;
969}
970
971//===----------------------------------------------------------------------===//
972// Sparse Tensor Syntax Operations.
973//===----------------------------------------------------------------------===//
974
975def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]> {
976  let summary = "Binary set operation utilized within linalg.generic";
977  let description = [{
978      Defines a computation within a `linalg.generic` operation that takes two
979      operands and executes one of the regions depending on whether both operands
980      or either operand is nonzero (i.e. stored explicitly in the sparse storage
981      format).
982
983      Three regions are defined for the operation and must appear in this order:
984      - overlap (elements present in both sparse tensors)
985      - left (elements only present in the left sparse tensor)
986      - right (element only present in the right sparse tensor)
987
988      Each region contains a single block describing the computation and result.
989      Every non-empty block must end with a sparse_tensor.yield and the return
990      type must match the type of `output`. The primary region's block has two
991      arguments, while the left and right region's block has only one argument.
992
993      A region may also be declared empty (i.e. `left={}`), indicating that the
994      region does not contribute to the output. For example, setting both
995      `left={}` and `right={}` is equivalent to the intersection of the two
996      inputs as only the overlap region will contribute values to the output.
997
998      As a convenience, there is also a special token `identity` which can be
999      used in place of the left or right region. This token indicates that
1000      the return value is the input value (i.e. func(%x) => return %x).
1001      As a practical example, setting `left=identity` and `right=identity`
1002      would be equivalent to a union operation where non-overlapping values
1003      in the inputs are copied to the output unchanged.
1004
1005      Due to the possibility of empty regions, i.e. lack of a value for certain
1006      cases, the result of this operation may only feed directly into the output
1007      of the `linalg.generic` operation or into into a custom reduction
1008      `sparse_tensor.reduce` operation that follows in the same region.
1009
1010      Example of isEqual applied to intersecting elements only:
1011
1012      ```mlir
1013      %C = tensor.empty(...)
1014      %0 = linalg.generic #trait
1015        ins(%A: tensor<?xf64, #SparseVector>,
1016            %B: tensor<?xf64, #SparseVector>)
1017        outs(%C: tensor<?xi8, #SparseVector>) {
1018        ^bb0(%a: f64, %b: f64, %c: i8) :
1019          %result = sparse_tensor.binary %a, %b : f64, f64 to i8
1020            overlap={
1021              ^bb0(%arg0: f64, %arg1: f64):
1022                %cmp = arith.cmpf "oeq", %arg0, %arg1 : f64
1023                %ret_i8 = arith.extui %cmp : i1 to i8
1024                sparse_tensor.yield %ret_i8 : i8
1025            }
1026            left={}
1027            right={}
1028          linalg.yield %result : i8
1029      } -> tensor<?xi8, #SparseVector>
1030      ```
1031
1032      Example of A+B in upper triangle, A-B in lower triangle:
1033
1034      ```mlir
1035      %C = tensor.empty(...)
1036      %1 = linalg.generic #trait
1037        ins(%A: tensor<?x?xf64, #CSR>, %B: tensor<?x?xf64, #CSR>
1038        outs(%C: tensor<?x?xf64, #CSR> {
1039        ^bb0(%a: f64, %b: f64, %c: f64) :
1040          %row = linalg.index 0 : index
1041          %col = linalg.index 1 : index
1042          %result = sparse_tensor.binary %a, %b : f64, f64 to f64
1043            overlap={
1044              ^bb0(%x: f64, %y: f64):
1045                %cmp = arith.cmpi "uge", %col, %row : index
1046                %upperTriangleResult = arith.addf %x, %y : f64
1047                %lowerTriangleResult = arith.subf %x, %y : f64
1048                %ret = arith.select %cmp, %upperTriangleResult, %lowerTriangleResult : f64
1049                sparse_tensor.yield %ret : f64
1050            }
1051            left=identity
1052            right={
1053              ^bb0(%y: f64):
1054                %cmp = arith.cmpi "uge", %col, %row : index
1055                %lowerTriangleResult = arith.negf %y : f64
1056                %ret = arith.select %cmp, %y, %lowerTriangleResult : f64
1057                sparse_tensor.yield %ret : f64
1058            }
1059          linalg.yield %result : f64
1060      } -> tensor<?x?xf64, #CSR>
1061      ```
1062
1063      Example of set difference. Returns a copy of A where its sparse structure
1064      is *not* overlapped by B. The element type of B can be different than A
1065      because we never use its values, only its sparse structure:
1066
1067      ```mlir
1068      %C = tensor.empty(...)
1069      %2 = linalg.generic #trait
1070        ins(%A: tensor<?x?xf64, #CSR>, %B: tensor<?x?xi32, #CSR>
1071        outs(%C: tensor<?x?xf64, #CSR> {
1072        ^bb0(%a: f64, %b: i32, %c: f64) :
1073          %result = sparse_tensor.binary %a, %b : f64, i32 to f64
1074            overlap={}
1075            left=identity
1076            right={}
1077          linalg.yield %result : f64
1078      } -> tensor<?x?xf64, #CSR>
1079      ```
1080  }];
1081
1082  let regions = (region AnyRegion:$overlapRegion, AnyRegion:$leftRegion, AnyRegion:$rightRegion);
1083  let arguments = (ins AnyType:$x, AnyType:$y, UnitAttr:$left_identity, UnitAttr:$right_identity);
1084  let results = (outs AnyType:$output);
1085  let assemblyFormat = [{
1086        $x `,` $y `:` attr-dict type($x) `,` type($y) `to` type($output) `\n`
1087        `overlap` `=` $overlapRegion `\n`
1088        `left` `=` (`identity` $left_identity^):($leftRegion)? `\n`
1089        `right` `=` (`identity` $right_identity^):($rightRegion)?
1090  }];
1091
1092  let hasVerifier = 1;
1093}
1094
1095def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]> {
1096
1097  let arguments = (ins AnyType:$x);
1098
1099  let results = (outs AnyType:$output);
1100
1101  let summary = "Unary set operation utilized within linalg.generic";
1102  let description = [{
1103      Defines a computation with a `linalg.generic` operation that takes a single
1104      operand and executes one of two regions depending on whether the operand is
1105      nonzero (i.e. stored explicitly in the sparse storage format).
1106
1107      Two regions are defined for the operation must appear in this order:
1108      - present (elements present in the sparse tensor)
1109      - absent (elements not present in the sparse tensor)
1110
1111      Each region contains a single block describing the computation and result.
1112      A non-empty block must end with a sparse_tensor.yield and the return type
1113      must match the type of `output`. The primary region's block has one
1114      argument, while the missing region's block has zero arguments. The
1115      absent region may only generate constants or values already computed
1116      on entry of the `linalg.generic` operation.
1117
1118      A region may also be declared empty (i.e. `absent={}`), indicating that the
1119      region does not contribute to the output.
1120
1121      Due to the possibility of empty regions, i.e. lack of a value for certain
1122      cases, the result of this operation may only feed directly into the output
1123      of the `linalg.generic` operation or into into a custom reduction
1124      `sparse_tensor.reduce` operation that follows in the same region.
1125
1126      Example of A+1, restricted to existing elements:
1127
1128      ```mlir
1129      %C = tensor.empty(...) : tensor<?xf64, #SparseVector>
1130      %0 = linalg.generic #trait
1131         ins(%A: tensor<?xf64, #SparseVector>)
1132        outs(%C: tensor<?xf64, #SparseVector>) {
1133        ^bb0(%a: f64, %c: f64) :
1134          %result = sparse_tensor.unary %a : f64 to f64
1135            present={
1136            ^bb0(%arg0: f64):
1137              %cf1 = arith.constant 1.0 : f64
1138              %ret = arith.addf %arg0, %cf1 : f64
1139              sparse_tensor.yield %ret : f64
1140            }
1141            absent={}
1142          linalg.yield %result : f64
1143      } -> tensor<?xf64, #SparseVector>
1144      ```
1145
1146      Example returning +1 for existing values and -1 for missing values:
1147
1148      ```mlir
1149      %p1 = arith.constant  1 : i32
1150      %m1 = arith.constant -1 : i32
1151      %C = tensor.empty(...) : tensor<?xi32, #SparseVector>
1152      %1 = linalg.generic #trait
1153         ins(%A: tensor<?xf64, #SparseVector>)
1154        outs(%C: tensor<?xi32, #SparseVector>) {
1155        ^bb0(%a: f64, %c: i32) :
1156          %result = sparse_tensor.unary %a : f64 to i32
1157            present={
1158            ^bb0(%x: f64):
1159              sparse_tensor.yield %p1 : i32
1160            }
1161            absent={
1162              sparse_tensor.yield %m1 : i32
1163            }
1164          linalg.yield %result : i32
1165      } -> tensor<?xi32, #SparseVector>
1166      ```
1167
1168      Example showing a structural inversion (existing values become missing in
1169      the output, while missing values are filled with 1):
1170
1171      ```mlir
1172      %c1 = arith.constant 1 : i64
1173      %C = tensor.empty(...) : tensor<?xi64, #SparseVector>
1174      %2 = linalg.generic #trait
1175         ins(%A: tensor<?xf64, #SparseVector>)
1176        outs(%C: tensor<?xi64, #SparseVector>) {
1177        ^bb0(%a: f64, %c: i64) :
1178          %result = sparse_tensor.unary %a : f64 to i64
1179            present={}
1180            absent={
1181              sparse_tensor.yield %c1 : i64
1182            }
1183          linalg.yield %result : i64
1184      } -> tensor<?xi64, #SparseVector>
1185      ```
1186  }];
1187
1188  let regions = (region AnyRegion:$presentRegion, AnyRegion:$absentRegion);
1189  let assemblyFormat = [{
1190        $x attr-dict `:` type($x) `to` type($output) `\n`
1191        `present` `=` $presentRegion `\n`
1192        `absent` `=` $absentRegion
1193  }];
1194  let hasVerifier = 1;
1195}
1196
1197def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [Pure, SameOperandsAndResultType]> {
1198  let summary = "Custom reduction operation utilized within linalg.generic";
1199  let description = [{
1200      Defines a computation with a `linalg.generic` operation that takes two
1201      operands and an identity value and reduces all stored values down to a
1202      single result based on the computation in the region.
1203
1204      The region must contain exactly one block taking two arguments. The block
1205      must end with a sparse_tensor.yield and the output must match the input
1206      argument types.
1207
1208      Note that this operation is only required for custom reductions beyond
1209      the standard reduction operations (add, sub, or, xor) that can be
1210      sparsified by merely reducing the stored values. More elaborate reduction
1211      operations (mul, and, min, max, etc.) would need to account for implicit
1212      zeros as well. They can still be handled using this custom reduction
1213      operation. The `linalg.generic` `iterator_types` defines which indices
1214      are being reduced. When the associated operands are used in an operation,
1215      a reduction will occur. The use of this explicit `reduce` operation
1216      is not required in most cases.
1217
1218      Example of Matrix->Vector reduction using max(product(x_i), 100):
1219
1220      ```mlir
1221      %cf1 = arith.constant 1.0 : f64
1222      %cf100 = arith.constant 100.0 : f64
1223      %C = tensor.empty(...)
1224      %0 = linalg.generic #trait
1225         ins(%A: tensor<?x?xf64, #SparseMatrix>)
1226        outs(%C: tensor<?xf64, #SparseVector>) {
1227        ^bb0(%a: f64, %c: f64) :
1228          %result = sparse_tensor.reduce %c, %a, %cf1 : f64 {
1229              ^bb0(%arg0: f64, %arg1: f64):
1230                %0 = arith.mulf %arg0, %arg1 : f64
1231                %cmp = arith.cmpf "ogt", %0, %cf100 : f64
1232                %ret = arith.select %cmp, %cf100, %0 : f64
1233                sparse_tensor.yield %ret : f64
1234            }
1235          linalg.yield %result : f64
1236      } -> tensor<?xf64, #SparseVector>
1237      ```
1238  }];
1239
1240  let regions = (region SizedRegion<1>:$region);
1241  let arguments = (ins AnyType:$x, AnyType:$y, AnyType:$identity);
1242  let results = (outs AnyType:$output);
1243  let assemblyFormat = "$x `,` $y `,` $identity attr-dict `:` type($output) $region";
1244
1245  let hasVerifier = 1;
1246}
1247
1248def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResultType]> {
1249  let summary = "Select operation utilized within linalg.generic";
1250  let description = [{
1251      Defines an evaluation within a `linalg.generic` operation that takes a single
1252      operand and decides whether or not to keep that operand in the output.
1253
1254      A single region must contain exactly one block taking one argument. The block
1255      must end with a sparse_tensor.yield and the output type must be boolean.
1256
1257      Value threshold is an obvious usage of the select operation. However, by using
1258      `linalg.index`, other useful selection can be achieved, such as selecting the
1259      upper triangle of a matrix.
1260
1261      Example of selecting A >= 4.0:
1262
1263      ```mlir
1264      %C = tensor.empty(...)
1265      %0 = linalg.generic #trait
1266         ins(%A: tensor<?xf64, #SparseVector>)
1267        outs(%C: tensor<?xf64, #SparseVector>) {
1268        ^bb0(%a: f64, %c: f64) :
1269          %result = sparse_tensor.select %a : f64 {
1270              ^bb0(%arg0: f64):
1271                %cf4 = arith.constant 4.0 : f64
1272                %keep = arith.cmpf "uge", %arg0, %cf4 : f64
1273                sparse_tensor.yield %keep : i1
1274            }
1275          linalg.yield %result : f64
1276      } -> tensor<?xf64, #SparseVector>
1277      ```
1278
1279      Example of selecting lower triangle of a matrix:
1280
1281      ```mlir
1282      %C = tensor.empty(...)
1283      %1 = linalg.generic #trait
1284         ins(%A: tensor<?x?xf64, #CSR>)
1285        outs(%C: tensor<?x?xf64, #CSR>) {
1286        ^bb0(%a: f64, %c: f64) :
1287          %row = linalg.index 0 : index
1288          %col = linalg.index 1 : index
1289          %result = sparse_tensor.select %a : f64 {
1290              ^bb0(%arg0: f64):
1291                %keep = arith.cmpf "olt", %col, %row : f64
1292                sparse_tensor.yield %keep : i1
1293            }
1294          linalg.yield %result : f64
1295      } -> tensor<?x?xf64, #CSR>
1296      ```
1297  }];
1298
1299  let regions = (region SizedRegion<1>:$region);
1300  let arguments = (ins AnyType:$x);
1301  let results = (outs AnyType:$output);
1302  let assemblyFormat = "$x attr-dict `:` type($x) $region";
1303
1304  let hasVerifier = 1;
1305}
1306
1307def SparseTensor_YieldOp : SparseTensor_Op<"yield", [Pure, Terminator,
1308    ParentOneOf<["BinaryOp", "UnaryOp", "ReduceOp", "SelectOp",
1309                 "ForeachOp", "IterateOp", "CoIterateOp"]>]> {
1310  let summary = "Yield from sparse_tensor set-like operations";
1311  let description = [{
1312      Yields a value from within a `binary`, `unary`, `reduce`,
1313      `select` or `foreach` block.
1314
1315      Example:
1316
1317      ```mlir
1318      %0 = sparse_tensor.unary %a : i64 to i64 {
1319        present={
1320          ^bb0(%arg0: i64):
1321            %cst = arith.constant 1 : i64
1322            %ret = arith.addi %arg0, %cst : i64
1323            sparse_tensor.yield %ret : i64
1324        }
1325      }
1326      ```
1327  }];
1328
1329  let builders = [
1330    OpBuilder<(ins),
1331    [{
1332      build($_builder, $_state, ValueRange());
1333    }]>,
1334    OpBuilder<(ins "Value":$yieldVal),
1335    [{
1336      build($_builder, $_state, ValueRange(yieldVal));
1337    }]>
1338  ];
1339
1340  let extraClassDeclaration = [{
1341     Value getSingleResult() {
1342        assert(hasSingleResult());
1343        return getResults().front();
1344     }
1345     bool hasSingleResult() {
1346        return getResults().size() == 1;
1347     }
1348  }];
1349
1350  let arguments = (ins Variadic<AnyType>:$results);
1351  let assemblyFormat = "$results attr-dict `:` type($results)";
1352}
1353
1354def SparseTensor_ForeachOp : SparseTensor_Op<"foreach",
1355    [SingleBlockImplicitTerminator<"YieldOp">]> {
1356  let summary = "Iterates over elements in a tensor";
1357  let description = [{
1358     Iterates over stored elements in a tensor (which are typically, but not always,
1359     non-zero for sparse tensors) and executes the block.
1360
1361     `tensor`: the input tensor to iterate over.
1362     `initArgs`: the initial loop argument to carry and update during each iteration.
1363     `order`: an optional permutation affine map that specifies the order in which
1364     the dimensions are visited (e.g., row first or column first). This is only
1365     applicable when the input tensor is a non-annotated dense tensor.
1366
1367     For an input tensor with dim-rank `n`, the block must take `n + 1`
1368     arguments (plus additional loop-carried variables as described below).
1369     The first `n` arguments provide the dimension-coordinates of the element
1370     being visited, and must all have `index` type.  The `(n+1)`-th argument
1371     provides the element's value, and must have the tensor's element type.
1372
1373     `sparse_tensor.foreach` can also operate on loop-carried variables and returns
1374     the final values after loop termination. The initial values of the variables are
1375     passed as additional SSA operands to the "sparse_tensor.foreach" following the n + 1
1376     SSA values mentioned above (n coordinates and 1 value).
1377
1378     The region must terminate with a "sparse_tensor.yield" that passes the current
1379     values of all loop-carried variables to the next iteration, or to the
1380     result, if at the last iteration. The number and static types of loop-carried
1381     variables may not change with iterations.
1382
1383     For example:
1384     ```mlir
1385     %c0 = arith.constant 0 : i32
1386     %ret = sparse_tensor.foreach in %0 init(%c0): tensor<?x?xi32, #DCSR>, i32 -> i32 do {
1387      ^bb0(%arg1: index, %arg2: index, %arg3: i32, %iter: i32):
1388        %sum = arith.add %iter, %arg3
1389        sparse_tensor.yield %sum
1390     }
1391     ```
1392
1393     It is important to note that the generated loop iterates over
1394     elements in their storage order.  However, regardless of the
1395     storage scheme used by the tensor, the block is always given
1396     the dimension-coordinates.
1397
1398     For example:
1399     ```mlir
1400     #COL_MAJOR = #sparse_tensor.encoding<{
1401       map = (d0, d1) -> (d1 : compressed, d0 : compressed)
1402     }>
1403
1404     // foreach on a column-major sparse tensor
1405     sparse_tensor.foreach in %0 : tensor<2x3xf64, #COL_MAJOR> do {
1406      ^bb0(%row: index, %col: index, %arg3: f64):
1407         // [%row, %col] -> [0, 0], [1, 0], [2, 0], [0, 1], [1, 1], [2, 1]
1408     }
1409
1410     #ROW_MAJOR = #sparse_tensor.encoding<{
1411       map = (d0, d1) -> (d0 : compressed, d1 : compressed)
1412     }>
1413
1414     // foreach on a row-major sparse tensor
1415     sparse_tensor.foreach in %0 : tensor<2x3xf64, #ROW_MAJOR> do {
1416      ^bb0(%row: index, %col: index, %arg3: f64):
1417         // [%row, %col] -> [0, 0], [0, 1], [1, 0], [1, 1], [2, 0], [2, 1]
1418     }
1419
1420     // foreach on a row-major dense tensor but visit column first
1421     sparse_tensor.foreach in %0 {order=affine_map<(i,j)->(j,i)>}: tensor<2x3xf64> do {
1422      ^bb0(%row: index, %col: index, %arg3: f64):
1423         // [%row, %col] -> [0, 0], [1, 0], [2, 0], [0, 1], [1, 1], [2, 1]
1424     }
1425
1426     ```
1427  }];
1428
1429  let builders = [
1430    OpBuilder<(ins "Value":$tensor, "ValueRange":$iterArgs, "AffineMapAttr":$order,
1431      "function_ref<void(OpBuilder &, Location, ValueRange, Value, ValueRange)>")>,
1432    OpBuilder<(ins "Value":$tensor, "AffineMapAttr":$order,
1433      "function_ref<void(OpBuilder &, Location, ValueRange, Value, ValueRange)>":$bodyBuilder),
1434    [{
1435      build($_builder, $_state, tensor, ValueRange(), order, bodyBuilder);
1436    }]>,
1437    OpBuilder<(ins "Value":$tensor,
1438      "function_ref<void(OpBuilder &, Location, ValueRange, Value, ValueRange)>":$bodyBuilder),
1439    [{
1440      build($_builder, $_state, tensor, ValueRange(), nullptr, bodyBuilder);
1441    }]>,
1442    OpBuilder<(ins "Value":$tensor, "ValueRange":$iterArgs,
1443      "function_ref<void(OpBuilder &, Location, ValueRange, Value, ValueRange)>":$bodyBuilder),
1444    [{
1445      build($_builder, $_state, tensor, iterArgs, nullptr, bodyBuilder);
1446    }]>
1447  ];
1448
1449  let regions = (region SizedRegion<1>:$region);
1450  let arguments = (ins AnyRankedTensor:$tensor,
1451                       Variadic<AnyType>:$initArgs,
1452                       OptionalAttr<AffineMapAttr>:$order);
1453  let results = (outs Variadic<AnyType>:$results);
1454  let assemblyFormat = "`in` $tensor (`init``(`$initArgs^`)`)? attr-dict"
1455                       "    `:` type($tensor) (`,` type($initArgs)^)?"
1456                       "  (`->` type($results)^)?  `do` $region";
1457  let hasVerifier = 1;
1458}
1459
1460//===----------------------------------------------------------------------===//
1461// Sparse Tensor Iteration Operations.
1462//===----------------------------------------------------------------------===//
1463
1464def ExtractIterSpaceOp : SparseTensor_Op<"extract_iteration_space",
1465    [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
1466  let summary = "Extracts an iteration space from a sparse tensor between certain levels";
1467  let description = [{
1468      Extracts a `!sparse_tensor.iter_space` from a sparse tensor between
1469      certain (consecutive) levels. For sparse levels, it is usually done by
1470      loading a postion range from the underlying sparse tensor storage.
1471      E.g., for a compressed level, the iteration space is extracted by
1472      [pos[i], pos[i+1]) supposing the the parent iterator points at `i`.
1473
1474      `tensor`: the input sparse tensor that defines the iteration space.
1475      `parentIter`: the iterator for the previous level, at which the iteration space
1476      at the current levels will be extracted.
1477      `loLvl`, `hiLvl`: the level range between [loLvl, hiLvl) in the input tensor that
1478      the returned iteration space covers. `hiLvl - loLvl` defines the dimension of the
1479      iteration space.
1480
1481      The type of returned the value is must be
1482      `!sparse_tensor.iter_space<#INPUT_ENCODING, lvls = $loLvl to $hiLvl>`.
1483      The returned iteration space can then be iterated over by
1484      `sparse_tensor.iterate` operations to visit every stored element
1485      (usually nonzeros) in the input sparse tensor.
1486
1487      Example:
1488      ```mlir
1489      // Extracts a 1-D iteration space from a COO tensor at level 1.
1490      %space = sparse_tensor.iteration.extract_space %sp at %it1 lvls = 1
1491        : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 0>
1492       ->!sparse_tensor.iter_space<#COO, lvls = 1>
1493      ```
1494  }];
1495
1496  let arguments = (ins AnySparseTensor:$tensor,
1497                       Optional<AnySparseIterator>:$parentIter,
1498                       LevelAttr:$loLvl, LevelAttr:$hiLvl);
1499  let results = (outs AnySparseIterSpace:$extractedSpace);
1500
1501  let extraClassDeclaration = [{
1502    std::pair<Level, Level> getLvlRange() {
1503      return std::make_pair(getLoLvl(), getHiLvl());
1504    }
1505    unsigned getSpaceDim() {
1506      return getHiLvl() - getLoLvl();
1507    }
1508    ArrayRef<::mlir::sparse_tensor::LevelType> getSpaceLvlTypes() {
1509      return getExtractedSpace().getType().getLvlTypes();
1510    }
1511  }];
1512
1513  let builders = [
1514    // Construct a 1-D iteration space.
1515    OpBuilder<(ins "Value":$tensor, "Value":$parentIter,
1516                   "sparse_tensor::Level":$loLvl),
1517    [{
1518      build($_builder, $_state, tensor, parentIter, loLvl, loLvl + 1);
1519    }]>,
1520    // Construct a 1-D root iteration space
1521    OpBuilder<(ins "Value":$tensor),
1522    [{
1523      build($_builder, $_state, tensor, nullptr, 0);
1524    }]>
1525  ];
1526
1527  let assemblyFormat = "$tensor (`at` $parentIter^)? `lvls` `=` custom<LevelRange>($loLvl, $hiLvl) "
1528                       " attr-dict `:` type($tensor) (`,` type($parentIter)^)? "
1529                       "`->` qualified(type($extractedSpace))";
1530
1531  let hasVerifier = 1;
1532}
1533
1534def ExtractValOp : SparseTensor_Op<"extract_value", [
1535    Pure,
1536    TypesMatchWith<"result type matches element type of tensor",
1537                   "tensor", "result",
1538                   "::llvm::cast<TensorType>($_self).getElementType()">]> {
1539  let summary = "Extracts a value from a sparse tensor using an iterator.";
1540  let description = [{
1541      The `sparse_tensor.extract_value` operation extracts the value
1542      pointed to by a sparse iterator from a sparse tensor.
1543
1544      Example:
1545
1546      ```mlir
1547      %val = sparse_tensor.extract_value %sp at %it
1548           : tensor<?x?xf32, #CSR>, !sparse_tensor.iterator<#CSR, lvl = 1>
1549      ```
1550  }];
1551
1552  let arguments = (ins AnySparseTensor:$tensor, AnySparseIterator:$iterator);
1553  let results = (outs AnyType:$result);
1554
1555  let assemblyFormat = "$tensor `at` $iterator attr-dict `:` type($tensor)`,` qualified(type($iterator))";
1556  let hasVerifier = 1;
1557}
1558
1559def IterateOp : SparseTensor_Op<"iterate",
1560    [RecursiveMemoryEffects, RecursivelySpeculatable,
1561     DeclareOpInterfaceMethods<LoopLikeOpInterface,
1562      ["getInitsMutable", "getLoopResults", "getRegionIterArgs",
1563       "getYieldedValuesMutable"]>,
1564     DeclareOpInterfaceMethods<RegionBranchOpInterface,
1565      ["getEntrySuccessorOperands"]>,
1566     SingleBlockImplicitTerminator<"sparse_tensor::YieldOp">]> {
1567
1568  let summary = "Iterates over a sparse iteration space";
1569  let description = [{
1570      The `sparse_tensor.iterate` operation represents a loop (nest) over
1571      the provided iteration space extracted from a specific sparse tensor.
1572      The operation defines an SSA value for a sparse iterator that points
1573      to the current stored element in the sparse tensor and SSA values
1574      for coordinates of the stored element. The coordinates are always
1575      converted to `index` type despite of the underlying sparse tensor
1576      storage. When coordinates are not used, the SSA values can be skipped
1577      by `_` symbols, which usually leads to simpler generated code after
1578      sparsification. For example:
1579
1580      ```mlir
1581      // The coordinate for level 0 is not used when iterating over a 2-D
1582      // iteration space.
1583      %sparse_tensor.iterate %iterator in %space at(_, %crd_1)
1584        : !sparse_tensor.iter_space<#CSR, lvls = 0 to 2>
1585      ```
1586
1587      `sparse_tensor.iterate` can also operate on loop-carried variables.
1588      It returns the final values after loop termination.
1589      The initial values of the variables are passed as additional SSA operands
1590      to the iterator SSA value and used coordinate SSA values mentioned
1591      above. The operation region has an argument for the iterator, variadic
1592      arguments for specified (used) coordiates and followed by one argument
1593      for each loop-carried variable, representing the value of the variable
1594      at the current iteration.
1595      The body region must contain exactly one block that terminates with
1596      `sparse_tensor.yield`.
1597
1598      The results of an `sparse_tensor.iterate` hold the final values after
1599      the last iteration. If the `sparse_tensor.iterate` defines any values,
1600      a yield must be explicitly present.
1601      The number and types of the `sparse_tensor.iterate` results must match
1602      the initial values in the iter_args binding and the yield operands.
1603
1604
1605      A nested `sparse_tensor.iterate` example that prints all the coordinates
1606      stored in the sparse input:
1607
1608      ```mlir
1609      func.func @nested_iterate(%sp : tensor<4x8xf32, #COO>) {
1610        // Iterates over the first level of %sp
1611        %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0
1612            : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 0 to 1>
1613        %r1 = sparse_tensor.iterate %it1 in %l1 at (%coord0)
1614            : !sparse_tensor.iter_space<#COO, lvls = 0 to 1>  {
1615          // Iterates over the second level of %sp
1616          %l2 = sparse_tensor.extract_iteration_space %sp at %it1 lvls = 1
1617              : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 0 to 1>
1618             -> !sparse_tensor.iter_space<#COO, lvls = 1 to 2>
1619          %r2 = sparse_tensor.iterate %it2 in %l2 at (coord1)
1620              : !sparse_tensor.iter_space<#COO, lvls = 1 to 2>  {
1621             vector.print %coord0 : index
1622             vector.print %coord1 : index
1623          }
1624        }
1625      }
1626
1627      ```
1628  }];
1629
1630  let arguments = (ins AnySparseIterSpace:$iterSpace,
1631                       Variadic<AnyType>:$initArgs,
1632                       I64BitSetAttr:$crdUsedLvls);
1633  let results = (outs Variadic<AnyType>:$results);
1634  let regions = (region SizedRegion<1>:$region);
1635
1636  let skipDefaultBuilders = 1;
1637  let builders = [
1638    OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs)>,
1639    OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs, "I64BitSet" :$crdUsedLvls)>
1640  ];
1641
1642  let extraClassDeclaration = [{
1643    unsigned getSpaceDim() {
1644      return getIterSpace().getType().getSpaceDim();
1645    }
1646    BlockArgument getIterator() {
1647      return getRegion().getArguments().back();
1648    }
1649    std::optional<BlockArgument> getLvlCrd(Level lvl) {
1650      if (getCrdUsedLvls()[lvl]) {
1651        uint64_t mask = (static_cast<uint64_t>(0x01u) << lvl) - 1;
1652        return getCrds()[llvm::popcount(mask & getCrdUsedLvls())];
1653      }
1654      return std::nullopt;
1655    }
1656    Block::BlockArgListType getCrds() {
1657      // User-provided iteration arguments -> coords -> iterator.
1658      return getRegion().getArguments().slice(getNumRegionIterArgs(), getCrdUsedLvls().count());
1659    }
1660    unsigned getNumRegionIterArgs() {
1661      return getRegion().getArguments().size() - 1 - getCrdUsedLvls().count();
1662    }
1663  }];
1664
1665  let hasVerifier = 1;
1666  let hasRegionVerifier = 1;
1667  let hasCanonicalizer = 1;
1668  let hasCustomAssemblyFormat = 1;
1669}
1670
1671def SparseTensor_CoIterateOp : SparseTensor_Op<"coiterate",
1672    [AttrSizedOperandSegments,
1673     SingleBlockImplicitTerminator<"sparse_tensor::YieldOp">,
1674     RecursiveMemoryEffects]> {
1675  let summary = "Co-iterates over a set of sparse iteration spaces";
1676  let description = [{
1677      The `sparse_tensor.coiterate` operation represents a loop (nest) over
1678      a set of iteration spaces. The operation can have multiple regions,
1679      with each of them defining a case to compute a result at the current iterations.
1680      The case condition is defined solely based on the pattern of specified iterators.
1681      For example:
1682      ```mlir
1683      %ret = sparse_tensor.coiterate (%sp1, %sp2) at(%coord) iter_args(%arg = %init)
1684           : (!sparse_tensor.iter_space<#CSR, lvls = 0>,
1685              !sparse_tensor.iter_space<#COO, lvls = 0>)
1686           -> index
1687      case %it1, _ {
1688        // %coord is specifed in space %sp1 but *NOT* specified in space %sp2.
1689      }
1690      case %it1, %it2 {
1691        // %coord is specifed in *BOTH* spaces %sp1 and %sp2.
1692      }
1693      ```
1694
1695      `sparse_tensor.coiterate` can also operate on loop-carried variables.
1696      It returns the final value for each loop-carried variable after loop termination.
1697      The initial values of the variables are passed as additional SSA operands
1698      to the iterator SSA value and used coordinate SSA values.
1699      Each operation region has variadic arguments for specified (used), one argument
1700      for each loop-carried variable, representing the value of the variable
1701      at the current iteration, followed by a list of arguments for iterators.
1702      The body region must contain exactly one block that terminates with
1703      `sparse_tensor.yield`.
1704
1705      The results of an `sparse_tensor.coiterate` hold the final values after
1706      the last iteration. If the `sparse_tensor.coiterate` defines any values,
1707      a yield must be explicitly present in every region defined in the operation.
1708      The number and types of the `sparse_tensor.coiterate` results must match
1709      the initial values in the iter_args binding and the yield operands.
1710
1711
1712      A `sparse_tensor.coiterate` example that does elementwise addition between two
1713      sparse vectors.
1714
1715
1716      ```mlir
1717      %ret = sparse_tensor.coiterate (%sp1, %sp2) at(%coord) iter_args(%arg = %init)
1718           : (!sparse_tensor.iter_space<#CSR, lvls = 0>,
1719              !sparse_tensor.iter_space<#CSR, lvls = 0>)
1720           -> tensor<?xindex, #CSR>
1721      case %it1, _ {
1722         // v = v1 + 0 = v1
1723         %v1 = sparse_tensor.extract_value %t1 at %it1 : index
1724         %yield = sparse_tensor.insert %v1 into %arg[%coord]
1725         sparse_tensor.yield %yield
1726      }
1727      case _, %it2 {
1728         // v = v2 + 0 = v2
1729         %v2 = sparse_tensor.extract_value %t2 at %it2 : index
1730         %yield = sparse_tensor.insert %v1 into %arg[%coord]
1731         sparse_tensor.yield %yield
1732      }
1733      case %it1, %it2 {
1734         // v = v1 + v2
1735         %v1 = sparse_tensor.extract_value %t1 at %it1 : index
1736         %v2 = sparse_tensor.extract_value %t2 at %it2 : index
1737         %v = arith.addi %v1, %v2 : index
1738         %yield = sparse_tensor.insert %v into %arg[%coord]
1739         sparse_tensor.yield %yield
1740      }
1741      ```
1742  }];
1743
1744  let arguments = (ins Variadic<AnySparseIterSpace>:$iterSpaces,
1745                       Variadic<AnyType>:$initArgs,
1746                       I64BitSetAttr:$crdUsedLvls,
1747                       I64BitSetArrayAttr:$cases);
1748  let results = (outs Variadic<AnyType>:$results);
1749  let regions = (region VariadicRegion<SizedRegion<1>>:$caseRegions);
1750
1751  let builders = [
1752    OpBuilder<(ins "ValueRange":$iterSpace, "ValueRange":$initArgs, "unsigned":$numCases)>,
1753  ];
1754
1755  let extraClassDeclaration = [{
1756    unsigned getSpaceDim() {
1757      return llvm::cast<::mlir::sparse_tensor::IterSpaceType>(
1758                 getIterSpaces().front().getType())
1759          .getSpaceDim();
1760    }
1761    I64BitSet getRegionDefinedSpace(unsigned regionIdx) {
1762      return I64BitSet(llvm::cast<IntegerAttr>(getCases()[regionIdx])
1763                           .getValue().getZExtValue());
1764    }
1765    auto getRegionDefinedSpaces() {
1766      return llvm::map_range(getCases().getValue(), [](Attribute attr) {
1767        return I64BitSet(llvm::cast<IntegerAttr>(attr).getValue().getZExtValue());
1768      });
1769    }
1770
1771    // The block arguments starts with user-provided iteration arguments,
1772    // follows by referenced coordinates and ends with iterators.
1773    Block::BlockArgListType getCrds(unsigned regionIdx) {
1774      return getRegion(regionIdx).getArguments()
1775          .slice(getNumRegionIterArgs(), getCrdUsedLvls().count());
1776    }
1777    unsigned getNumRegionIterArgs() {
1778      return getInitArgs().size();
1779    }
1780    Block::BlockArgListType getRegionIterArgs(unsigned regionIdx) {
1781      return getRegion(regionIdx).getArguments()
1782          .take_front(getNumRegionIterArgs());
1783    }
1784    Block::BlockArgListType getRegionIterators(unsigned regionIdx) {
1785      return getRegion(regionIdx).getArguments()
1786          .take_back(getRegionDefinedSpace(regionIdx).count());
1787    }
1788    ValueRange getYieldedValues(unsigned regionIdx);
1789
1790    // Returns a vector of regions that are the `sub-cases` of the given case region.
1791    // E.g., `case %it1, _, %it3` is a subcase of `case %it1, %it2, %it3`.
1792    SmallVector<Region *> getSubCasesOf(unsigned regionIdx);
1793  }];
1794
1795  let hasVerifier = 1;
1796  let hasRegionVerifier = 1;
1797  let hasCustomAssemblyFormat = 1;
1798}
1799
1800//===----------------------------------------------------------------------===//
1801// Sparse Tensor Debugging and Test-Only Operations.
1802//===----------------------------------------------------------------------===//
1803
1804def SparseTensor_PrintOp : SparseTensor_Op<"print"> {
1805  string summary = "Prints a sparse tensor (for testing and debugging)";
1806  string description = [{
1807    Prints the individual components of a sparse tensors (the positions,
1808    coordinates, and values components) to stdout for testing and debugging
1809    purposes. This operation lowers to just a few primitives in a light-weight
1810    runtime support to simplify supporting this operation on new platforms.
1811
1812    Example:
1813
1814    ```mlir
1815    sparse_tensor.print %tensor : tensor<1024x1024xf64, #CSR>
1816    ```
1817  }];
1818
1819  let arguments = (ins AnySparseTensor:$tensor);
1820  let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
1821}
1822
1823def SparseTensor_HasRuntimeLibraryOp
1824    : SparseTensor_Op<"has_runtime_library", []>, Results<(outs I1:$result)> {
1825  string summary = "Indicates whether running in runtime/codegen mode";
1826  string description = [{
1827    Returns a boolean value that indicates whether the sparsifier runs in
1828    runtime library mode or not. For testing only! This operation is useful
1829    for writing test cases that require different code depending on
1830    runtime/codegen mode.
1831
1832    Example:
1833
1834    ```mlir
1835    %has_runtime = sparse_tensor.has_runtime_library
1836    scf.if %has_runtime {
1837      ...
1838    }
1839    ```
1840  }];
1841  let assemblyFormat = "attr-dict";
1842}
1843
1844#endif // SPARSETENSOR_OPS
1845