xref: /llvm-project/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td (revision 8f0c014b12663129d8bfe0cc89f06e7a1d8b48c2)
1//===-- Passes.td - Sparse tensor pass definition file -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES
10#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES
11
12include "mlir/Pass/PassBase.td"
13
14def SparseAssembler : Pass<"sparse-assembler", "ModuleOp"> {
15  let summary = "Add [dis]assemble operations on external sparse tensors";
16  let description = [{
17    Unlike dense tensors, MLIR does **not** provide a direct `_mlir_ciface_`
18    ABI for passing sparse tensors as arguments from and to external methods
19    (within MLIR-generated methods, sparse tensors can be freely passed
20    around, but this eventually uses a bespoke parameter passing format
21    that is subject to change; like opaque pointers when the sparse runtime
22    support library is used or the constituent arrays and structs for
23    direct IR codegen). The sparse assembler pass, however, can be used
24    to obtain a stable `_mlir_ciface_` API for passing sparse tensors
25    from and to an external environment, such as Python, PyTorch, or JAX.
26
27    The pass converts public entry methods that use sparse tensors as
28    input parameters and/or output return values into wrapper methods
29    that [dis]assemble the individual tensors that constitute the actual
30    storage used externally into MLIR sparse tensors. This pass can be used
31    to prepare the public entry methods of a program that is compiled by the
32    MLIR sparsifier to interface with an external runtime, e.g., when passing
33    sparse tensors as numpy arrays from and to Python. Note that eventual
34    bufferization decisions (e.g. who [de]allocates the underlying memory)
35    should be resolved in agreement with the external runtime.
36
37    By default, the pass uses the [dis]assemble operations to input and output
38    sparse tensors. When the direct-out option is set, however, the output
39    directly returns the MLIR allocated buffers to the external runtime.
40
41    The pass should always run before the actual sparsification passes.
42  }];
43  let constructor = "mlir::createSparseAssembler()";
44  let dependentDialects = [
45    "bufferization::BufferizationDialect",
46    "sparse_tensor::SparseTensorDialect",
47    "tensor::TensorDialect",
48  ];
49  let options = [
50    Option<"directOut", "direct-out", "bool",
51      "false", "Directly returns buffers externally">,
52  ];
53}
54
55def SparseReinterpretMap : Pass<"sparse-reinterpret-map", "ModuleOp"> {
56  let summary = "Reinterprets sparse tensor type mappings";
57  let description = [{
58    A pass that reinterprets the mappings in all sparse tensor types in a
59    way that enables subsequent sparsification. This involves expressing all
60    `linalg.generic` operations in terms of level coordinates (rather than
61    the dimension coordinates of the input tensors) to align the iteration
62    space with the potentially remapped level space as well as resolving cycles
63    in the resulting iteration graphs with explicit sparse tensor conversions
64    where needed.
65  }];
66  let constructor = "mlir::createSparseReinterpretMapPass()";
67  let dependentDialects = [
68    "affine::AffineDialect",
69    "linalg::LinalgDialect",
70    "sparse_tensor::SparseTensorDialect",
71  ];
72  let options = [
73    Option<"scope", "scope", "mlir::ReinterpretMapScope",
74       "mlir::ReinterpretMapScope::kAll",
75       "Set the reiterpretation scope", [{llvm::cl::values(
76         clEnumValN(mlir::ReinterpretMapScope::kAll, "all",
77                    "Run on every applicable operations."),
78         clEnumValN(mlir::ReinterpretMapScope::kGenericOnly,
79                    "only-generic",
80                    "Run only on linalg.generic operations."),
81         clEnumValN(mlir::ReinterpretMapScope::kExceptGeneric,
82                    "except-generic",
83                    "Run on operations expect linalg.generic (e.g., foreach)"))}]>,
84  ];
85}
86
87def PreSparsificationRewrite : Pass<"pre-sparsification-rewrite", "ModuleOp"> {
88  let summary = "Applies sparse tensor rewriting rules prior to sparsification";
89  let description = [{
90    A pass that applies rewriting rules to sparse tensor operations prior
91    to running the actual sparsification pass.
92  }];
93  let constructor = "mlir::createPreSparsificationRewritePass()";
94  let dependentDialects = [
95    "arith::ArithDialect",
96    "bufferization::BufferizationDialect",
97    "linalg::LinalgDialect",
98    "memref::MemRefDialect",
99    "scf::SCFDialect",
100    "sparse_tensor::SparseTensorDialect",
101  ];
102}
103
104def SparsificationPass : Pass<"sparsification", "ModuleOp"> {
105  let summary = "Automatically generate sparse tensor code from sparse tensor types";
106  let description = [{
107    A pass that implements the core functionality of a **sparsifier**.
108    Each Linalg operation (MLIR's tensor index notation) that operates on
109    sparse tensor types is converted into code in which the sparsity is
110    explicit both in terms of co-iterating looping logic as well as
111    selected sparse storage schemes.
112
113    See the `SparseTensor` dialect documentation for more background.
114
115    Example input:
116
117    ```mlir
118    #matvec = {
119      indexing_maps = [
120        affine_map<(i,j) -> (i,j)>, // A
121        affine_map<(i,j) -> (j)>,   // b
122        affine_map<(i,j) -> (i)>    // x (out)
123      ],
124      iterator_types = ["parallel", "reduction"],
125      doc = "X(i) += A(i,j) * B(j)"
126    }
127
128    // Multiply a sparse matrix A with a dense vector b into a dense vector x.
129    func.func @kernel_matvec(%arga: tensor<?x?xf64, #SparseMatrix>,
130                             %argb: tensor<?xf64>,
131                             %argx: tensor<?xf64>) -> tensor<?xf64> {
132      %0 = linalg.generic #matvec
133        ins(%arga, %argb: tensor<?x?xf64, #SparseMatrix>, tensor<?xf64>)
134        outs(%argx: tensor<?xf64>) {
135        ^bb(%a: f64, %b: f64, %x: f64):
136          %0 = arith.mulf %a, %b : f64
137          %1 = arith.addf %x, %0 : f64
138          linalg.yield %1 : f64
139      } -> tensor<?xf64>
140      return %0 : tensor<?xf64>
141    }
142    ```
143  }];
144  let constructor = "mlir::createSparsificationPass()";
145  let dependentDialects = [
146    "affine::AffineDialect",
147    "arith::ArithDialect",
148    "bufferization::BufferizationDialect",
149    "LLVM::LLVMDialect",
150    "linalg::LinalgDialect",
151    "memref::MemRefDialect",
152    "scf::SCFDialect",
153    "sparse_tensor::SparseTensorDialect",
154  ];
155  // TODO(57514): These enum options are duplicated in Passes.h.
156  let options = [
157    Option<"parallelization", "parallelization-strategy", "mlir::SparseParallelizationStrategy",
158           "mlir::SparseParallelizationStrategy::kNone",
159           "Set the parallelization strategy", [{llvm::cl::values(
160             clEnumValN(mlir::SparseParallelizationStrategy::kNone, "none",
161                        "Turn off sparse parallelization."),
162             clEnumValN(mlir::SparseParallelizationStrategy::kDenseOuterLoop,
163                        "dense-outer-loop",
164                        "Enable dense outer loop sparse parallelization."),
165             clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageOuterLoop,
166                        "any-storage-outer-loop",
167                        "Enable sparse parallelization regardless of storage for the outer loop."),
168             clEnumValN(mlir::SparseParallelizationStrategy::kDenseAnyLoop,
169                        "dense-any-loop",
170                        "Enable dense parallelization for any loop."),
171             clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageAnyLoop,
172                        "any-storage-any-loop",
173                        "Enable sparse parallelization for any storage and loop."))}]>,
174    Option<"sparseEmitStrategy", "sparse-emit-strategy", "mlir::SparseEmitStrategy",
175           "mlir::SparseEmitStrategy::kFunctional",
176           "Emit functional code or interfaces (to debug) for sparse loops", [{llvm::cl::values(
177             clEnumValN(mlir::SparseEmitStrategy::kFunctional, "functional",
178                        "Emit functional code (with scf.for/while)."),
179             clEnumValN(mlir::SparseEmitStrategy::kSparseIterator, "sparse-iterator",
180                        "Emit (experimental) loops (with sparse.iterate)."),
181             clEnumValN(mlir::SparseEmitStrategy::kDebugInterface, "debug-interface",
182                        "Emit non-functional but easy-to-read interfaces to debug."))}]>,
183    Option<"enableRuntimeLibrary", "enable-runtime-library", "bool",
184           "true", "Enable runtime library for manipulating sparse tensors">,
185  ];
186}
187
188def StageSparseOperations : Pass<"stage-sparse-ops", "func::FuncOp"> {
189  let summary = "Decompose a complex sparse operation into multiple stages";
190  let description = [{
191    A pass that decomposes a complex sparse operation into multiple stages.
192    E.g., CSR -> CSC is staged into CSR -> COO (unordered) -> sort -> CSC.
193  }];
194  let constructor = "mlir::createStageSparseOperationsPass()";
195  let dependentDialects = [
196    "sparse_tensor::SparseTensorDialect",
197  ];
198}
199
200def LowerSparseOpsToForeach : Pass<"lower-sparse-ops-to-foreach", "ModuleOp"> {
201  let summary = "Applies sparse tensor rewriting rules after sparsification";
202  let description = [{
203    A pass that lowers high-level sparse operations to sparse_tensor.foreach.
204  }];
205  let constructor = "mlir::createLowerSparseOpsToForeachPass()";
206  let dependentDialects = [
207    "affine::AffineDialect",
208    "arith::ArithDialect",
209    "bufferization::BufferizationDialect",
210    "linalg::LinalgDialect",
211    "memref::MemRefDialect",
212    "scf::SCFDialect",
213    "sparse_tensor::SparseTensorDialect",
214  ];
215  let options = [
216    Option<"enableRuntimeLibrary", "enable-runtime-library", "bool",
217           "true", "Enable runtime library for manipulating sparse tensors">,
218    Option<"enableConvert", "enable-convert", "bool",
219           "true", "Enable rewriting rules for the convert operator">,
220  ];
221}
222
223def LowerForeachToSCF : Pass<"lower-sparse-foreach-to-scf", "func::FuncOp"> {
224  let summary = "Decompose a complex sparse operation into multiple stages";
225  let description = [{
226    A pass that lowers sparse_tensor.foreach operation to scf dialect.
227  }];
228  let constructor = "mlir::createLowerForeachToSCFPass()";
229  let dependentDialects = [
230    "memref::MemRefDialect",
231    "scf::SCFDialect",
232    "sparse_tensor::SparseTensorDialect",
233  ];
234}
235
236def SparseTensorConversionPass : Pass<"sparse-tensor-conversion", "ModuleOp"> {
237  let summary = "Convert sparse tensors and primitives to library calls";
238  let description = [{
239    A pass that converts sparse tensor primitives into calls into a runtime
240    support library. Sparse tensor types are converted into opaque pointers
241    to the underlying sparse storage schemes.
242
243    The use of opaque pointers together with runtime support library keeps
244    the conversion relatively simple, but at the expense of IR opacity,
245    which obscures opportunities for subsequent optimization of the IR.
246    An alternative is provided by the SparseTensorCodegen pass.
247
248    Example of the conversion:
249
250    ```mlir
251      Before:
252        func.func @foo(%arg0: tensor<8x8xf32, #CSR>) -> memref<?xindex> {
253          %0 = sparse_tensor.pointers %arg0 {dimension = 1 : index}
254             : tensor<8x8xf32, #CSR> to memref<?xindex>
255          return %0 : memref<?xindex>
256        }
257
258      After:
259        func.func @foo(%arg0: !llvm.ptr) -> memref<?xindex> {
260          %c1 = arith.constant 1 : index
261          %0 = call @sparsePointers0(%arg0, %c1)
262             : (!llvm.ptr, index) -> memref<?xindex>
263          return %0 : memref<?xindex>
264        }
265    ```
266  }];
267  let constructor = "mlir::createSparseTensorConversionPass()";
268  let dependentDialects = [
269    "arith::ArithDialect",
270    "bufferization::BufferizationDialect",
271    "LLVM::LLVMDialect",
272    "linalg::LinalgDialect",
273    "memref::MemRefDialect",
274    "scf::SCFDialect",
275    "sparse_tensor::SparseTensorDialect",
276  ];
277}
278
279def SparseTensorCodegen : Pass<"sparse-tensor-codegen", "ModuleOp"> {
280  let summary = "Convert sparse tensors and primitives to actual code";
281  let description = [{
282    A pass that converts sparse tensor types and primitives to actual
283    compiler visible buffers and compiler IR that implements these
284    primitives on the selected sparse tensor storage schemes.
285
286    This pass provides an alternative to the SparseTensorConversion pass,
287    eliminating the dependence on a runtime support library, and providing
288    much more opportunities for subsequent compiler optimization of the
289    generated code.
290
291    Example of the conversion:
292
293    ```mlir
294      Before:
295        func.func @foo(%arg0: tensor<8x8xf32, #CSR>) -> memref<?xindex> {
296          %0 = sparse_tensor.pointers %arg0 {dimension = 1 : index}
297             : tensor<8x8xf32, #CSR> to memref<?xindex>
298          return %0 : memref<?xindex>
299        }
300
301      After:
302        func.func @foo(%arg0: memref<2xindex>,
303                       %arg1: memref<3xindex>,
304                       %arg2: memref<?xindex>,
305                       %arg3: memref<?xindex>,
306                       %arg4: memref<?xf32>) -> memref<?xindex> {
307          return %arg2 : memref<?xindex>
308        }
309    ```
310  }];
311  let constructor = "mlir::createSparseTensorCodegenPass()";
312  let dependentDialects = [
313    "arith::ArithDialect",
314    "bufferization::BufferizationDialect",
315    "linalg::LinalgDialect",
316    "memref::MemRefDialect",
317    "scf::SCFDialect",
318    "sparse_tensor::SparseTensorDialect",
319  ];
320  let options = [
321    Option<"enableBufferInitialization", "enable-buffer-initialization", "bool",
322           "false", "Enable zero-initialization of the memory buffers">,
323    Option<"createSparseDeallocs", "create-sparse-deallocs", "bool",
324           "true", "Specify if the temporary buffers created by the sparse "
325                   "compiler should be deallocated. For compatibility with core "
326                   "bufferization passes. "
327                   "This option is only used when enable-runtime-library=false. "
328                   "See also create-deallocs for BufferizationOption.">,
329  ];
330}
331
332def SparseBufferRewrite : Pass<"sparse-buffer-rewrite", "ModuleOp"> {
333  let summary = "Rewrite sparse primitives on buffers to actual code";
334  let description = [{
335    A pass that rewrites sparse primitives on buffers to the MLIR implementation
336    of the primitives. For example, sparse_tensor.sort operator is implemented
337    in this pass.
338  }];
339  let constructor = "mlir::createSparseBufferRewritePass()";
340  let dependentDialects = [
341    "arith::ArithDialect",
342    "linalg::LinalgDialect",
343    "memref::MemRefDialect",
344    "scf::SCFDialect",
345    "sparse_tensor::SparseTensorDialect",
346  ];
347  let options = [
348    Option<"enableBufferInitialization", "enable-buffer-initialization", "bool",
349           "false", "Enable zero-initialization of the memory buffers">,
350  ];
351}
352
353def SparseVectorization : Pass<"sparse-vectorization", "ModuleOp"> {
354  let summary = "Vectorizes loops after sparsification";
355  let description = [{
356    A pass that converts loops after sparsification into vector loops.
357    The vector dialect is used as target to provide an architectural
358    neutral way of exploiting any platform that supports SIMD instructions.
359
360    The vector length (viz. `vl`) describes the number of packed data elements
361    (e.g. both vector<16xf32> and vector<16xf64> have a vector length of 16 even
362    though the actual bitwidths differ). A small multiple of the actual lengths
363    supported in hardware typically results in efficient SIMD code, since the
364    backend will map longer vectors to multiple vector registers, thereby
365    effectively unrolling an addition level within the generated for-loop.
366
367    Example of the conversion:
368
369    ```mlir
370      Before:
371        %3 = memref.load %2[] : memref<f32>
372        %4 = scf.for %arg3 = %c0 to %c1024 step %c1 iter_args(%arg4 = %3) -> (f32) {
373          %6 = memref.load %0[%arg3] : memref<?xf32>
374          %7 = memref.load %1[%arg3] : memref<1024xf32>
375          %8 = arith.mulf %6, %7 : f32
376          %9 = arith.addf %arg4, %8 : f32
377          scf.yield %9 : f32
378        }
379        memref.store %4, %2[] : memref<f32>
380
381      After:
382        %3 = memref.load %2[] : memref<f32>
383        %4 = vector.insertelement %3, %cst[%c0 : index] : vector<32xf32>
384        %5 = scf.for %arg3 = %c0 to %c1024 step %c32 iter_args(%arg4 = %4) -> (vector<32xf32>) {
385          %8 = vector.load %0[%arg3] : memref<?xf32>, vector<32xf32>
386          %9 = vector.load %1[%arg3] : memref<1024xf32>, vector<32xf32>
387          %10 = arith.mulf %8, %9 : vector<32xf32>
388          %11 = arith.addf %arg4, %10 : vector<32xf32>
389          scf.yield %11 : vector<32xf32>
390        }
391        %6 = vector.reduction <add>, %5 : vector<32xf32> into f32
392        memref.store %6, %2[] : memref<f32>
393    ```
394  }];
395  let constructor = "mlir::createSparseVectorizationPass()";
396  let dependentDialects = [
397    "arith::ArithDialect",
398    "memref::MemRefDialect",
399    "scf::SCFDialect",
400    "sparse_tensor::SparseTensorDialect",
401    "vector::VectorDialect",
402  ];
403  let options = [
404    Option<"vectorLength", "vl", "int32_t", "0",
405           "Set the vector length (use 0 to disable vectorization)">,
406    Option<"enableVLAVectorization", "enable-vla-vectorization", "bool",
407           "false", "Enable vector length agnostic vectorization">,
408    Option<"enableSIMDIndex32", "enable-simd-index32", "bool", "false",
409           "Enable i32 indexing into vectors (for efficient gather/scatter)">,
410  ];
411}
412
413def SparseGPUCodegen : Pass<"sparse-gpu-codegen", "ModuleOp"> {
414  let summary = "Generates GPU code during sparsification";
415  let description = [{
416    Enables the sparsifier to use GPU acceleration. When the number of GPU
417    threads is set to zero, the pass tries to enable GPU acceleration by
418    means of direct library calls (like cuSPARSE).
419  }];
420  let constructor = "mlir::createSparseGPUCodegenPass()";
421  let dependentDialects = [
422    "arith::ArithDialect",
423    "bufferization::BufferizationDialect",
424    "gpu::GPUDialect",
425    "linalg::LinalgDialect",
426    "memref::MemRefDialect",
427    "scf::SCFDialect",
428    "sparse_tensor::SparseTensorDialect",
429  ];
430  let options = [
431    Option<"numThreads", "num-threads", "int32_t", "1024", "Sets the number of GPU threads">,
432    Option<"enableRuntimeLibrary", "enable-runtime-library", "bool",
433           "true", "Enable runtime library for manipulating sparse tensors">,
434  ];
435}
436
437def StorageSpecifierToLLVM : Pass<"sparse-storage-specifier-to-llvm", "ModuleOp"> {
438  let summary = "Lower sparse storage specifer to llvm structure";
439  let description = [{
440     This pass rewrites sparse tensor storage specifier-related operations into
441     LLVMDialect, and converts sparse tensor storage specifier into an llvm.struct.
442
443     Example of the conversion:
444     ```mlir
445     Before:
446       %0 = sparse_tensor.storage_specifier.get %arg0 dim_sz at 0
447       : !sparse_tensor.storage_specifier<#CSR> to i64
448
449     After:
450       %0 = llvm.extractvalue %arg0[0, 0] : !llvm.struct<(array<2 x i64>, array<3 x i64>)>
451     ```
452  }];
453  let constructor = "mlir::createStorageSpecifierToLLVMPass()";
454  let dependentDialects = [
455    "arith::ArithDialect",
456    "LLVM::LLVMDialect",
457    "sparse_tensor::SparseTensorDialect",
458  ];
459}
460
461def SparsificationAndBufferization : Pass<"sparsification-and-bufferization", "ModuleOp"> {
462  let summary = "Mini-pipeline that combines bufferization and sparsifiation";
463  let description = [{
464     This pass forms a mini-pipeline that combines bufferization and sparsifiation.
465  }];
466  let constructor = "mlir::createSparsificationAndBufferizationPass()";
467  let dependentDialects = [
468    "affine::AffineDialect",
469    "arith::ArithDialect",
470    "bufferization::BufferizationDialect",
471    "gpu::GPUDialect",
472    "LLVM::LLVMDialect",
473    "linalg::LinalgDialect",
474    "memref::MemRefDialect",
475    "scf::SCFDialect",
476    "sparse_tensor::SparseTensorDialect",
477    "vector::VectorDialect"
478  ];
479  // Important optimization options are made visible to the mini-pipeline
480  // so that clients can set these (when not using the full pipeline).
481  let options = [
482    Option<"vectorLength", "vl", "int32_t", "0",
483           "Set the vector length (use 0 to disable vectorization)">,
484    Option<"enableVLAVectorization", "enable-vla-vectorization", "bool", "false",
485           "Enable vector length agnostic vectorization">,
486    Option<"enableSIMDIndex32", "enable-simd-index32", "bool", "false",
487           "Enable i32 indexing into vectors (for efficient gather/scatter)">,
488    Option<"enableGPULibgen", "enable-gpu-libgen", "bool", "false",
489           "Enable GPU acceleration by means of direct library calls">,
490    Option<"sparseEmitStrategy", "sparse-emit-strategy", "mlir::SparseEmitStrategy",
491           "mlir::SparseEmitStrategy::kFunctional",
492           "Emit functional code or interfaces (to debug) for sparse loops", [{llvm::cl::values(
493             clEnumValN(mlir::SparseEmitStrategy::kFunctional, "functional",
494                        "Emit functional code (with scf.for/while)."),
495             clEnumValN(mlir::SparseEmitStrategy::kSparseIterator, "sparse-iterator",
496                        "Emit (experimental) loops (with sparse.iterate)."),
497             clEnumValN(mlir::SparseEmitStrategy::kDebugInterface, "debug-interface",
498                        "Emit non-functional but easy-to-read interfaces to debug."))}]>,
499    Option<"parallelization", "parallelization-strategy", "mlir::SparseParallelizationStrategy",
500           "mlir::SparseParallelizationStrategy::kNone",
501           "Set the parallelization strategy", [{llvm::cl::values(
502             clEnumValN(mlir::SparseParallelizationStrategy::kNone, "none",
503                        "Turn off sparse parallelization."),
504             clEnumValN(mlir::SparseParallelizationStrategy::kDenseOuterLoop,
505                        "dense-outer-loop",
506                        "Enable dense outer loop sparse parallelization."),
507             clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageOuterLoop,
508                        "any-storage-outer-loop",
509                        "Enable sparse parallelization regardless of storage for the outer loop."),
510             clEnumValN(mlir::SparseParallelizationStrategy::kDenseAnyLoop,
511                        "dense-any-loop",
512                        "Enable dense parallelization for any loop."),
513             clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageAnyLoop,
514                        "any-storage-any-loop",
515                        "Enable sparse parallelization for any storage and loop."))}]>,
516  ];
517}
518
519//===----------------------------------------------------------------------===//
520// Sparse Iteration Transform Passes
521//===----------------------------------------------------------------------===//
522
523def SparseSpaceCollapse : Pass<"sparse-space-collapse", "func::FuncOp"> {
524  let summary = "sparse space collapsing pass";
525  let description = [{
526     This pass collapses consecutive sparse spaces (extracted from the same tensor)
527     into one multi-dimensional space. The pass is not yet stabilized.
528  }];
529  let constructor = "mlir::createSparseSpaceCollapsePass()";
530  let dependentDialects = [
531    "sparse_tensor::SparseTensorDialect",
532  ];
533}
534
535def LowerSparseIterationToSCF : Pass<"lower-sparse-iteration-to-scf", "func::FuncOp"> {
536  let summary = "lower sparse_tensor.iterate/coiterate into scf loops";
537  let description = [{
538     This pass lowers `sparse_tensor.iterate` operations into `scf.for/while` operations.
539     The pass is not yet stabilized.
540  }];
541  let constructor = "mlir::createLowerSparseIterationToSCFPass()";
542  let dependentDialects = [
543    "memref::MemRefDialect",
544    "scf::SCFDialect",
545    "sparse_tensor::SparseTensorDialect",
546  ];
547}
548
549
550#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES
551