xref: /llvm-project/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td (revision 77f8297c6fdaa62121ddb108043dcaad5c45c7ad)
1//===-- SparseTensorAttrDefs.td - attributes definitions ---*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SPARSETENSOR_ATTRDEFS
10#define SPARSETENSOR_ATTRDEFS
11
12include "mlir/IR/AttrTypeBase.td"
13include "mlir/IR/EnumAttr.td"
14include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
15include "mlir/IR/TensorEncoding.td"
16
17// All of the sparse tensor attributes will extend this class.
18class SparseTensor_Attr<string name,
19                        list<Trait> traits = []>
20    : AttrDef<SparseTensor_Dialect, name, traits>;
21
22//===----------------------------------------------------------------------===//
23// A simple bitset attribute wrapped around a single int64_t to encode a set of
24// sparse tensor levels.
25//===----------------------------------------------------------------------===//
26
27def I64BitSetAttr : TypedAttrBase<I64, "IntegerAttr",
28      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
29           CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getType().isInteger(64)">]>,
30      "LevelSet attribute"> {
31  let returnType = [{::mlir::sparse_tensor::I64BitSet}];
32  let convertFromStorage = [{::mlir::sparse_tensor::I64BitSet($_self.getValue().getZExtValue())}];
33}
34
35def I64BitSetArrayAttr :
36    TypedArrayAttrBase<I64BitSetAttr, "I64BitSet array attribute">;
37
38//===----------------------------------------------------------------------===//
39// These attributes are just like `IndexAttr` except that they clarify whether
40// the index refers to a dimension (an axis of the semantic tensor) or a level
41// (an axis of the actual storage format).
42//===----------------------------------------------------------------------===//
43
44def DimensionAttr :
45    TypedAttrBase<
46      Index, "IntegerAttr",
47      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
48           CPred<"::llvm::isa<::mlir::IndexType>("
49                     "::llvm::cast<::mlir::IntegerAttr>($_self).getType())">]>,
50      "dimension attribute"> {
51  let returnType = [{::mlir::sparse_tensor::Dimension}];
52  let convertFromStorage = [{$_self.getValue().getZExtValue()}];
53}
54
55def LevelAttr :
56    TypedAttrBase<
57      Index, "IntegerAttr",
58      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
59           CPred<"::llvm::isa<::mlir::IndexType>("
60                     "::llvm::cast<::mlir::IntegerAttr>($_self).getType())">]>,
61      "level attribute"> {
62  let returnType = [{::mlir::sparse_tensor::Level}];
63  let convertFromStorage = [{$_self.getValue().getZExtValue()}];
64}
65
66//===----------------------------------------------------------------------===//
67// Sparse Tensor Dimension Slice Attribute.
68//===----------------------------------------------------------------------===//
69
70def SparseTensorDimSliceAttr : SparseTensor_Attr<"SparseTensorDimSlice", []> {
71  let mnemonic = "slice";
72
73  let description = [{
74    An attribute to encode slice information of a sparse tensor on a particular
75    dimension (a tuple of offset, size, stride).
76  }];
77
78  let parameters = (
79    ins
80    "int64_t" : $offset,
81    "int64_t" : $size,
82    "int64_t" : $stride
83  );
84
85  let builders = [
86    // The nop slice (i.e., that includes everything).
87    AttrBuilder<(ins), [{ return $_get($_ctxt, 0, kDynamic, 1); }]>
88  ];
89
90  let extraClassDeclaration = [{
91    void print(llvm::raw_ostream &os) const;
92
93    /// Special value for dynamic offset/size/stride.
94    static constexpr int64_t kDynamic = -1;
95    static constexpr bool isDynamic(int64_t v) { return v == kDynamic; }
96    static std::optional<uint64_t> getStatic(int64_t v);
97    static std::string getStaticString(int64_t v);
98
99    std::optional<uint64_t> getStaticOffset() const;
100    std::optional<uint64_t> getStaticStride() const;
101    std::optional<uint64_t> getStaticSize() const;
102    bool isCompletelyDynamic() const;
103  }];
104
105  let genVerifyDecl = 1;
106  let hasCustomAssemblyFormat = 1;
107}
108
109//===----------------------------------------------------------------------===//
110// Sparse Tensor Type Encoding Attribute.
111//===----------------------------------------------------------------------===//
112
113// Sparse tensor encoding attribute.
114def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
115         [ DeclareAttrInterfaceMethods<VerifiableTensorEncoding> ] > {
116  let mnemonic = "encoding";
117
118  let description = [{
119    An attribute to encode information on sparsity properties of tensors, inspired
120    by the TACO formalization of sparse tensors. This encoding is eventually used
121    by a **sparsifier** pass to generate sparse code fully automatically from a
122    sparsity-agnostic representation of the computation, i.e., an implicit sparse
123    representation is converted to an explicit sparse representation where co-iterating
124    loops operate on sparse storage formats rather than tensors with a sparsity
125    encoding. Compiler passes that run before this sparsifier pass need to be aware
126    of the semantics of tensor types with such a sparsity encoding.
127
128    In this encoding, we use **dimension** to refer to the axes of the semantic tensor,
129    and **level** to refer to the axes of the actual storage format, i.e., the
130    operational representation of the sparse tensor in memory. The number of
131    dimensions is usually the same as the number of levels (such as CSR storage format).
132    However, the encoding can also map dimensions to higher-order levels (for example,
133    to encode a block-sparse BSR storage format) or to lower-order levels
134    (for example, to linearize dimensions as a single level in the storage).
135
136    The encoding contains a map that provides the following:
137
138    - An ordered sequence of dimension specifications, each of which defines:
139      - the dimension-size (implicit from the tensor’s dimension-shape)
140      - a **dimension-expression**
141    - An ordered sequence of level specifications, each of which includes a required
142      **level-type**, which defines how the level should be stored. Each level-type
143      consists of:
144      - a **level-expression**, which defines what is stored
145      - a **level-format**
146      - a collection of **level-properties** that apply to the level-format
147
148    Each level-expression is an affine expression over dimension-variables. Thus, the
149    level-expressions collectively define an affine map from dimension-coordinates to
150    level-coordinates. The dimension-expressions collectively define the inverse map,
151    which only needs to be provided for elaborate cases where it cannot be inferred
152    automatically.
153
154    Each dimension could also have an optional `SparseTensorDimSliceAttr`.
155    Within the sparse storage format, we refer to indices that are stored explicitly
156    as **coordinates** and offsets into the storage format as **positions**.
157
158    The supported level-formats are the following:
159
160    - **dense** : all entries along this level are stored and linearized.
161    - **batch** : all entries along this level are stored but not linearized.
162    - **compressed** : only nonzeros along this level are stored
163    - **loose_compressed** : as compressed, but allows for free space between regions
164    - **singleton** : a variant of the compressed format, where coordinates have no siblings
165    - **structured[n, m]** : the compression uses a n:m encoding
166      (viz. n out of m consecutive elements are nonzero)
167
168    For a compressed level, each position interval is represented in a compact
169    way with a lowerbound `pos(i)` and an upperbound `pos(i+1) - 1`, which implies
170    that successive intervals must appear in order without any "holes" in between
171    them. The loose compressed format relaxes these constraints by representing each
172    position interval with a lowerbound `lo(i)` and an upperbound `hi(i)`, which
173    allows intervals to appear in arbitrary order and with elbow room between them.
174
175    By default, each level-type has the property of being unique (no duplicate
176    coordinates at that level) and ordered (coordinates appear sorted at that
177    level). For singleton levels, the coordinates are fused with its parents in AoS
178    (array of structures) scheme. The following properties can be added to a level-format
179    to change this default behavior:
180
181    - **nonunique** : duplicate coordinates may appear at the level
182    - **nonordered** : coordinates may appear in arbribratry order
183    - **soa** : only applicable to singleton levels, fuses the singleton
184      level in SoA (structure of arrays) scheme.
185
186    In addition to the map, the following fields are optional:
187
188    - The required bitwidth for position storage (integral offsets
189      into the sparse storage scheme).  A narrow width reduces the memory
190      footprint of overhead storage, as long as the width suffices to
191      define the total required range (viz. the maximum number of stored
192      entries over all indirection levels).  The choices are `8`, `16`,
193      `32`, `64`, or, the default, `0` to indicate the native bitwidth.
194
195    - The required bitwidth for coordinate storage (the coordinates
196      of stored entries).  A narrow width reduces the memory footprint
197      of overhead storage, as long as the width suffices to define
198      the total required range (viz. the maximum value of each tensor
199      coordinate over all levels).  The choices are `8`, `16`, `32`,
200      `64`, or, the default, `0` to indicate a native bitwidth.
201
202    - The explicit value for the sparse tensor. If explicitVal is set,
203      then all the non-zero values in the tensor have the same explicit value.
204      The default value Attribute() indicates that it is not set. This
205      is useful for binary-valued sparse tensors whose values can either
206      be an implicit value (0 by default) or an explicit value (such as 1).
207      In this approach, we don't store explicit/implicit values, and metadata
208      (such as position and coordinate arrays) alone fully defines the original tensor.
209      This yields additional savings for the storage requirements,
210      as well as for the computational time, since we skip operating on
211      implicit values and can constant fold the explicit values where they are used.
212
213    - The implicit value for the sparse tensor. If implicitVal is set,
214      then the "zero" value in the tensor is equal to the implicit value.
215      For now, we only support `0` as the implicit value but it could be
216      extended in the future. The default value Attribute() indicates that
217      the implicit value is `0` (same type as the tensor element type).
218
219    Examples:
220
221    ```mlir
222    // Sparse vector.
223    #SparseVector = #sparse_tensor.encoding<{
224      map = (i) -> (i : compressed)
225    }>
226    ... tensor<?xf32, #SparseVector> ...
227
228    // Sorted coordinate scheme (arranged in AoS format by default).
229    #SortedCOO = #sparse_tensor.encoding<{
230      map = (i, j) -> (i : compressed(nonunique), j : singleton)
231    }>
232    // coordinates = {x_crd, y_crd}[nnz]
233    ... tensor<?x?xf64, #SortedCOO> ...
234
235    // Sorted coordinate scheme (arranged in SoA format).
236    #SortedCOO = #sparse_tensor.encoding<{
237      map = (i, j) -> (i : compressed(nonunique), j : singleton(soa))
238    }>
239    // coordinates = {x_crd[nnz], y_crd[nnz]}
240    ... tensor<?x?xf64, #SortedCOO> ...
241
242    // Batched sorted coordinate scheme, with high encoding.
243    #BCOO = #sparse_tensor.encoding<{
244      map = (i, j, k) -> (i : dense, j : compressed(nonunique, high), k : singleton)
245    }>
246    ... tensor<10x10xf32, #BCOO> ...
247
248    // Compressed sparse row.
249    #CSR = #sparse_tensor.encoding<{
250      map = (i, j) -> (i : dense, j : compressed)
251    }>
252    ... tensor<100x100xbf16, #CSR> ...
253
254    // Doubly compressed sparse column storage with specific bitwidths.
255    #DCSC = #sparse_tensor.encoding<{
256      map = (i, j) -> (j : compressed, i : compressed),
257      posWidth = 32,
258      crdWidth = 8
259    }>
260    ... tensor<8x8xf64, #DCSC> ...
261
262    // Doubly compressed sparse column storage with specific
263    // explicit and implicit values.
264    #DCSC = #sparse_tensor.encoding<{
265      map = (i, j) -> (j : compressed, i : compressed),
266      explicitVal = 1 : i64,
267      implicitVal = 0 : i64
268    }>
269    ... tensor<8x8xi64, #DCSC> ...
270
271    // Block sparse row storage (2x3 blocks).
272    #BSR = #sparse_tensor.encoding<{
273      map = ( i, j ) ->
274      ( i floordiv 2 : dense,
275        j floordiv 3 : compressed,
276        i mod 2      : dense,
277        j mod 3      : dense
278      )
279    }>
280    ... tensor<20x30xf32, #BSR> ...
281
282    // Same block sparse row storage (2x3 blocks) but this time
283    // also with a redundant reverse mapping, which can be inferred.
284    #BSR_explicit = #sparse_tensor.encoding<{
285      map = { ib, jb, ii, jj }
286            ( i = ib * 2 + ii,
287              j = jb * 3 + jj) ->
288      ( ib = i floordiv 2 : dense,
289        jb = j floordiv 3 : compressed,
290        ii = i mod 2 : dense,
291        jj = j mod 3 : dense)
292    }>
293    ... tensor<20x30xf32, #BSR_explicit> ...
294
295    // ELL format.
296    // In the simple format for matrix, one array stores values and another
297    // array stores column indices. The arrays have the same number of rows
298    // as the original matrix, but only have as many columns as
299    // the maximum number of nonzeros on a row of the original matrix.
300    // There are many variants for ELL such as jagged diagonal scheme.
301    // To implement ELL, map provides a notion of "counting a
302    // dimension", where every stored element with the same coordinate
303    // is mapped to a new slice. For instance, ELL storage of a 2-d
304    // tensor can be defined with the mapping (i, j) -> (#i, i, j)
305    // using the notation of [Chou20]. Lacking the # symbol in MLIR's
306    // affine mapping, we use a free symbol c to define such counting,
307    // together with a constant that denotes the number of resulting
308    // slices. For example, the mapping [c](i, j) -> (c * 3 * i, i, j)
309    // with the level-types ["dense", "dense", "compressed"] denotes ELL
310    // storage with three jagged diagonals that count the dimension i.
311    #ELL = #sparse_tensor.encoding<{
312      map = [c](i, j) -> (c * 3 * i : dense, i : dense, j : compressed)
313    }>
314    ... tensor<?x?xf64, #ELL> ...
315
316    // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
317    // offset = 0, size = 8, and a dynamic stride on the second dimension).
318    #CSR_SLICE = #sparse_tensor.encoding<{
319      map = (i : #sparse_tensor<slice(0, 4, 1)>,
320             j : #sparse_tensor<slice(0, 8, ?)>) ->
321            (i : dense, j : compressed)
322    }>
323    ... tensor<?x?xf64, #CSR_SLICE> ...
324
325    ```
326  }];
327
328  //
329  // Data in sparse tensor encoding.
330  //
331  let parameters = (
332    ins
333    // A level-type for each level of the sparse storage
334    // (consists of a level-format combined with level-properties).
335    ArrayRefParameter<
336      "::mlir::sparse_tensor::LevelType",
337      "level-types"
338      >: $lvlTypes,
339
340    // A mapping from dimension-coordinates to level-coordinates.
341    "AffineMap":$dimToLvl,
342
343    // A mapping from level-coordinates to dimension-coordinates.
344    "AffineMap":$lvlToDim,
345
346    // The required bitwidth for position storage.
347    "unsigned":$posWidth,
348
349    // The required bitwidth for coordinate storage.
350    "unsigned":$crdWidth,
351
352    // The required explicit value.
353    "::mlir::Attribute":$explicitVal,
354
355    // The required implicit value.
356    "::mlir::Attribute":$implicitVal,
357
358    // A slice attribute for each dimension of the tensor type.
359    ArrayRefParameter<
360      "::mlir::sparse_tensor::SparseTensorDimSliceAttr",
361      "per dimension slice metadata"
362      >: $dimSlices
363  );
364
365  let builders = [
366    AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::LevelType>":$lvlTypes,
367                     CArg<"AffineMap", "{}">:$dimToLvl,
368                     CArg<"AffineMap", "{}">:$lvlToDim,
369                     CArg<"unsigned", "0">:$posWidth,
370                     CArg<"unsigned", "0">:$crdWidth,
371                     CArg<"::mlir::Attribute", "{}">:$explicitVal,
372                     CArg<"::mlir::Attribute", "{}">:$implicitVal), [{
373      if (!dimToLvl) {
374        dimToLvl = ::mlir::AffineMap::getMultiDimIdentityMap(lvlTypes.size(), $_ctxt);
375      }
376      if (!lvlToDim) {
377        lvlToDim = ::mlir::sparse_tensor::inferLvlToDim(dimToLvl, $_ctxt);
378      }
379      return $_get($_ctxt, lvlTypes, dimToLvl, lvlToDim, posWidth, crdWidth,
380        explicitVal, implicitVal,
381        ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr>{});
382    }]>
383  ];
384
385  let extraClassDeclaration = [{
386    //
387    // Factory methods.
388    //
389
390    /// Constructs a new encoding with the given dimToLvl mapping,
391    /// and all other fields inherited from `this`.
392    SparseTensorEncodingAttr withDimToLvl(AffineMap dimToLvl) const;
393    SparseTensorEncodingAttr withDimToLvl(SparseTensorEncodingAttr enc) const;
394
395    /// Constructs a new encoding with dimToLvl reset to the default/identity,
396    /// and all other fields inherited from `this`.
397    SparseTensorEncodingAttr withoutDimToLvl() const;
398
399    /// Constructs a new encoding with the given pointer and index
400    /// bitwidths, and all other fields inherited from `this`.
401    SparseTensorEncodingAttr withBitWidths(unsigned posWidth, unsigned crdWidth) const;
402
403    /// Constructs a new encoding with the pointer and index bitwidths
404    /// reset to the default, and all other fields inherited from `this`.
405    SparseTensorEncodingAttr withoutBitWidths() const;
406
407    /// Constructs a new encoding with the given explicit value
408    /// and all other fields inherited from `this`.
409    SparseTensorEncodingAttr withExplicitVal(Attribute explicitVal) const;
410
411    /// Constructs a new encoding with the explicit value
412    /// reset to the default, and all other fields inherited from `this`.
413    SparseTensorEncodingAttr withoutExplicitVal() const;
414
415    /// Constructs a new encoding with the given implicit value
416    /// and all other fields inherited from `this`.
417    SparseTensorEncodingAttr withImplicitVal(Attribute implicitVal) const;
418
419    /// Constructs a new encoding with the implicit value
420    /// reset to the default, and all other fields inherited from `this`.
421    SparseTensorEncodingAttr withoutImplicitVal() const;
422
423    /// Constructs a new encoding with the given dimSlices, and all
424    /// other fields inherited from `this`.
425    SparseTensorEncodingAttr withDimSlices(ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr> dimSlices) const;
426
427    /// Constructs a new encoding with the dimSlices reset to the default,
428    /// and all other fields inherited from `this`.
429    SparseTensorEncodingAttr withoutDimSlices() const;
430
431    //
432    // Rank methods.
433    //
434
435    /// Returns the expected number of tensor dimensions.  Asserts that
436    /// the encoding is non-null (since no fixed result is valid for every
437    /// dense-tensor).
438    ::mlir::sparse_tensor::Dimension getDimRank() const;
439
440    /// Returns the number of storage levels.  Asserts that the encoding
441    /// is non-null (since no fixed result is valid for every dense-tensor).
442    ::mlir::sparse_tensor::Level getLvlRank() const;
443
444    uint64_t getBatchLvlRank() const;
445
446    //
447    // lvlTypes methods.
448    //
449
450    /// Safely looks up the level-type for the requested level.  (Returns
451    /// `LevelType::Dense` for the null encoding, since dense-tensors
452    /// are always all-dense.)
453    ::mlir::sparse_tensor::LevelType getLvlType(::mlir::sparse_tensor::Level l) const;
454
455    bool isDenseLvl(::mlir::sparse_tensor::Level l) const { return isDenseLT(getLvlType(l)); }
456    bool isCompressedLvl(::mlir::sparse_tensor::Level l) const { return isCompressedLT(getLvlType(l)); }
457    bool isSingletonLvl(::mlir::sparse_tensor::Level l) const { return isSingletonLT(getLvlType(l)); }
458    bool isLooseCompressedLvl(::mlir::sparse_tensor::Level l) const { return isLooseCompressedLT(getLvlType(l)); }
459    bool isNOutOfMLvl(::mlir::sparse_tensor::Level l) const { return isNOutOfMLT(getLvlType(l)); }
460    bool isOrderedLvl(::mlir::sparse_tensor::Level l) const { return isOrderedLT(getLvlType(l)); }
461    bool isUniqueLvl(::mlir::sparse_tensor::Level l) const { return isUniqueLT(getLvlType(l)); }
462
463    /// Returns true if every level is dense.  Also returns true for
464    /// the null encoding (since dense-tensors are always all-dense).
465    bool isAllDense() const;
466
467    /// Returns true if every level is ordered.  Also returns true for
468    /// the null encoding (since dense-tensors are always all-ordered).
469    bool isAllOrdered() const;
470
471    //
472    // Storage type methods.
473    //
474
475    /// Returns the coordinate-overhead MLIR type, defaulting to `IndexType`.
476    Type getCrdElemType() const;
477
478    /// Returns the position-overhead MLIR type, defaulting to `IndexType`.
479    Type getPosElemType() const;
480
481    /// Returns the coordinate-memref MLIR type, an optional tensorDimShape is
482    /// used to refine the leading batch dimensions (if any).
483    MemRefType getCrdMemRefType(
484      std::optional<ArrayRef<int64_t>> tensorDimShape = std::nullopt) const;
485
486    /// Returns the position-memref MLIR type, an optional tensorDimShape is
487    /// used to refine the leading batch dimensions (if any).
488    MemRefType getPosMemRefType(
489      std::optional<ArrayRef<int64_t>> tensorDimShape = std::nullopt) const;
490
491    //
492    // dimToLvl methods.
493    //
494
495    /// Returns true if the dimToLvl mapping is the identity.
496    /// Also returns true for the null encoding (since dense-tensors
497    /// always have the identity mapping).
498    bool isIdentity() const;
499
500    /// Returns true if the dimToLvl mapping is a permutation.
501    /// Also returns true for the null encoding (since dense-tensors
502    /// always have the identity mapping).
503    bool isPermutation() const;
504
505    //
506    // dimSlices methods.
507    //
508
509    bool isSlice() const;
510
511    ::mlir::sparse_tensor::SparseTensorDimSliceAttr getDimSlice(::mlir::sparse_tensor::Dimension dim) const;
512
513    std::optional<uint64_t> getStaticDimSliceOffset(::mlir::sparse_tensor::Dimension dim) const;
514    std::optional<uint64_t> getStaticDimSliceStride(::mlir::sparse_tensor::Dimension dim) const;
515    std::optional<uint64_t> getStaticLvlSliceOffset(::mlir::sparse_tensor::Level lvl) const;
516    std::optional<uint64_t> getStaticLvlSliceStride(::mlir::sparse_tensor::Level lvl) const;
517
518    //
519    // Helper function to translate between level/dimension space.
520    //
521
522    SmallVector<int64_t> translateShape(::mlir::ArrayRef<int64_t> srcShape, ::mlir::sparse_tensor::CrdTransDirectionKind) const;
523    ValueRange translateCrds(::mlir::OpBuilder &builder, ::mlir::Location loc, ::mlir::ValueRange crds, ::mlir::sparse_tensor::CrdTransDirectionKind) const;
524
525    //
526    // COO methods.
527    //
528
529    /// Returns the starting level of this sparse tensor type for a
530    /// trailing COO region that spans **at least** two levels. If
531    /// no such COO region is found, then returns the level-rank.
532    ///
533    /// DEPRECATED: use getCOOSegment instead;
534    Level getAoSCOOStart() const;
535
536    /// Returns a list of COO segments in the sparse tensor types.
537    SmallVector<COOSegment> getCOOSegments() const;
538
539    //
540    // Printing methods.
541    //
542
543    void printSymbols(AffineMap &map, AsmPrinter &printer) const;
544    void printDimensions(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr> dimSlices) const;
545    void printLevels(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::LevelType> lvlTypes) const;
546  }];
547
548  let genVerifyDecl = 1;
549  let hasCustomAssemblyFormat = 1;
550}
551
552//===----------------------------------------------------------------------===//
553// Sparse Tensor Storage Specifier Enum Attribute.
554//===----------------------------------------------------------------------===//
555
556// The C++ enum for Storage Specifier kind.
557def SparseTensorStorageSpecifierKindEnum
558    : I32EnumAttr<"StorageSpecifierKind", "sparse tensor storage specifier kind", [
559        I32EnumAttrCase<"LvlSize",    0, "lvl_sz">,
560        I32EnumAttrCase<"PosMemSize", 1, "pos_mem_sz">,
561        I32EnumAttrCase<"CrdMemSize", 2, "crd_mem_sz">,
562        I32EnumAttrCase<"ValMemSize", 3, "val_mem_sz">,
563        I32EnumAttrCase<"DimOffset",  4, "dim_offset">,
564        I32EnumAttrCase<"DimStride",  5, "dim_stride">,
565      ]> {
566  let genSpecializedAttr = 0;
567  let cppNamespace = SparseTensor_Dialect.cppNamespace;
568}
569
570// Define the enum StorageSpecifier kind attribute.
571def SparseTensorStorageSpecifierKindAttr
572    : EnumAttr<SparseTensor_Dialect, SparseTensorStorageSpecifierKindEnum,
573               "SparseTensorStorageSpecifierKind"> {
574   let mnemonic = "kind";
575}
576
577//===----------------------------------------------------------------------===//
578// Sparse Tensor Traits.
579//===----------------------------------------------------------------------===//
580
581def IsSparseTensorPred
582  : CPred<"!!::mlir::sparse_tensor::getSparseTensorEncoding($_self)">;
583
584def IsSparseTensorSlicePred
585  : CPred<"!!::mlir::sparse_tensor::getSparseTensorEncoding($_self) && "
586          "  ::mlir::sparse_tensor::getSparseTensorEncoding($_self).isSlice()">;
587
588class SparseTensorOf<list<Type> allowedTypes>
589  : RankedTensorOf<allowedTypes, [IsSparseTensorPred], "sparse tensor">;
590
591class SparseTensorSliceOf<list<Type> allowedTypes>
592  : RankedTensorOf<allowedTypes, [IsSparseTensorSlicePred], "sparse tensor slice">;
593
594class ScalarLikeOf<list<Type> allowedTypes>
595  : AnyTypeOf<[0DTensorOf<allowedTypes>, AnyTypeOf<allowedTypes>], "scalar like">;
596
597def AnySparseTensor : SparseTensorOf<[AnyType]>;
598def AnySparseTensorSlice : SparseTensorSliceOf<[AnyType]>;
599def AnyIndexingScalarLike : ScalarLikeOf<[AnySignlessIntegerOrIndex]>;
600
601//===----------------------------------------------------------------------===//
602// Sparse Tensor Sorting Algorithm Attribute.
603//===----------------------------------------------------------------------===//
604
605// Currently, we only provide four implementations, and expose the
606// implementations via attribute algorithm. In the future, if we will
607// need to support both stable and non-stable quick sort, we may add
608// quick_sort_nonstable enum to the attribute. Alternative, we may use
609// two attributes, (stable|nonstable, algorithm), to specify a sorting
610// implementation.
611//
612// --------------------------------------------------------------------------
613// |           | hybrid_qsort| insertion_sort | qsort       | heap_sort.    |
614// |non-stable | Impl        | X              |  Impl       | Impl          |
615// |stable     | X           | Impl           |  Not Impl   | X             |
616// --------------------------------------------------------------------------
617
618// The C++ enum for sparse tensor sort kind.
619def SparseTensorSortKindEnum
620    : I32EnumAttr<"SparseTensorSortKind", "sparse tensor sort algorithm", [
621        I32EnumAttrCase<"HybridQuickSort",    0, "hybrid_quick_sort">,
622        I32EnumAttrCase<"InsertionSortStable", 1, "insertion_sort_stable">,
623        I32EnumAttrCase<"QuickSort", 2, "quick_sort">,
624        I32EnumAttrCase<"HeapSort", 3, "heap_sort">,
625      ]> {
626  let genSpecializedAttr = 0;
627  let cppNamespace = SparseTensor_Dialect.cppNamespace;
628}
629
630// Define the enum sparse tensor sort kind attribute.
631def SparseTensorSortKindAttr
632    : EnumAttr<SparseTensor_Dialect, SparseTensorSortKindEnum,
633               "SparseTensorSortAlgorithm"> {
634}
635
636
637//===----------------------------------------------------------------------===//
638// Sparse Tensor Coordinate Translation Direction Attribute.
639//===----------------------------------------------------------------------===//
640
641// The C++ enum for sparse tensor coordinate translation direction enum.
642def SparseTensorCrdTransDirectionEnum
643    : I32EnumAttr<"CrdTransDirectionKind", "sparse tensor coordinate translation direction", [
644        I32EnumAttrCase<"dim2lvl", 0, "dim_to_lvl">,
645        I32EnumAttrCase<"lvl2dim", 1, "lvl_to_dim">,
646      ]> {
647  let genSpecializedAttr = 0;
648  let cppNamespace = SparseTensor_Dialect.cppNamespace;
649}
650
651// The C++ enum for sparse tensor coordinate translation direction attribute.
652def SparseTensorCrdTransDirectionAttr
653    : EnumAttr<SparseTensor_Dialect, SparseTensorCrdTransDirectionEnum,
654               "CrdTransDirection"> {
655}
656
657#endif // SPARSETENSOR_ATTRDEFS
658