xref: /llvm-project/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp (revision 8154494e28364f3eb85f2bba624f33224e4aed08)
1 //===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
10 
11 #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h"
12 #include "mlir/Conversion/Passes.h"
13 #include "mlir/Dialect/Arith/Transforms/Passes.h"
14 #include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
15 #include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
16 #include "mlir/Dialect/Bufferization/Transforms/Passes.h"
17 #include "mlir/Dialect/Func/IR/FuncOps.h"
18 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
19 #include "mlir/Dialect/GPU/Transforms/Passes.h"
20 #include "mlir/Dialect/LLVMIR/NVVMDialect.h"
21 #include "mlir/Dialect/Linalg/Passes.h"
22 #include "mlir/Dialect/MemRef/Transforms/Passes.h"
23 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
24 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
25 #include "mlir/Pass/PassManager.h"
26 #include "mlir/Transforms/Passes.h"
27 
28 //===----------------------------------------------------------------------===//
29 // Pipeline implementation.
30 //===----------------------------------------------------------------------===//
31 
32 void mlir::sparse_tensor::buildSparseCompiler(
33     OpPassManager &pm, const SparseCompilerOptions &options) {
34   pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
35   pm.addPass(createSparsificationAndBufferizationPass(
36       getBufferizationOptionsForSparsification(
37           options.testBufferizationAnalysisOnly),
38       options.sparsificationOptions(), options.sparseTensorConversionOptions(),
39       options.createSparseDeallocs, options.enableRuntimeLibrary,
40       options.enableBufferInitialization, options.vectorLength,
41       /*enableVLAVectorization=*/options.armSVE,
42       /*enableSIMDIndex32=*/options.force32BitVectorIndices));
43   if (options.testBufferizationAnalysisOnly)
44     return;
45   pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
46   pm.addNestedPass<func::FuncOp>(
47       mlir::bufferization::createFinalizingBufferizePass());
48 
49   // GPU code generation.
50   const bool gpuCodegen = options.gpuTriple.hasValue();
51   if (gpuCodegen) {
52     pm.addPass(createSparseGPUCodegenPass());
53     pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass());
54     pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass());
55     pm.addNestedPass<gpu::GPUModuleOp>(createLowerGpuOpsToNVVMOpsPass());
56   }
57 
58   // TODO(springerm): Add sparse support to the BufferDeallocation pass and add
59   // it to this pipeline.
60   pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
61   pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
62   pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
63   pm.addPass(memref::createExpandStridedMetadataPass());
64   pm.addPass(createLowerAffinePass());
65   pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
66   pm.addPass(createFinalizeMemRefToLLVMConversionPass());
67   pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass());
68   pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass());
69   pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
70   pm.addPass(createConvertMathToLibmPass());
71   pm.addPass(createConvertComplexToLibmPass());
72   // Repeat convert-vector-to-llvm.
73   pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
74   pm.addPass(createConvertComplexToLLVMPass());
75   pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
76   pm.addPass(createConvertFuncToLLVMPass());
77 
78   // Finalize GPU code generation.
79   if (gpuCodegen) {
80 #if MLIR_GPU_TO_CUBIN_PASS_ENABLE
81     pm.addNestedPass<gpu::GPUModuleOp>(createGpuSerializeToCubinPass(
82         options.gpuTriple, options.gpuChip, options.gpuFeatures));
83 #endif
84     pm.addPass(createGpuToLLVMConversionPass());
85   }
86 
87   pm.addPass(createReconcileUnrealizedCastsPass());
88 }
89 
90 //===----------------------------------------------------------------------===//
91 // Pipeline registration.
92 //===----------------------------------------------------------------------===//
93 
94 void mlir::sparse_tensor::registerSparseTensorPipelines() {
95   PassPipelineRegistration<SparseCompilerOptions>(
96       "sparse-compiler",
97       "The standard pipeline for taking sparsity-agnostic IR using the"
98       " sparse-tensor type, and lowering it to LLVM IR with concrete"
99       " representations and algorithms for sparse tensors.",
100       buildSparseCompiler);
101 }
102