xref: /llvm-project/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp (revision cbc780223374740fcc6771a6d5f53070a7bed2e7)
1 //===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
10 
11 #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h"
12 #include "mlir/Conversion/Passes.h"
13 #include "mlir/Dialect/Arith/Transforms/Passes.h"
14 #include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
15 #include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
16 #include "mlir/Dialect/Bufferization/Transforms/Passes.h"
17 #include "mlir/Dialect/Func/IR/FuncOps.h"
18 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
19 #include "mlir/Dialect/GPU/Transforms/Passes.h"
20 #include "mlir/Dialect/LLVMIR/NVVMDialect.h"
21 #include "mlir/Dialect/Linalg/Passes.h"
22 #include "mlir/Dialect/MemRef/Transforms/Passes.h"
23 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
24 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
25 #include "mlir/Pass/PassManager.h"
26 #include "mlir/Transforms/Passes.h"
27 
28 //===----------------------------------------------------------------------===//
29 // Pipeline implementation.
30 //===----------------------------------------------------------------------===//
31 
32 void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm,
33                                           const SparsifierOptions &options) {
34   // Rewrite named linalg ops into generic ops and apply fusion.
35   pm.addNestedPass<func::FuncOp>(createLinalgGeneralizeNamedOpsPass());
36   pm.addNestedPass<func::FuncOp>(createLinalgElementwiseOpFusionPass());
37 
38   // Sparsification and bufferization mini-pipeline.
39   pm.addPass(createSparsificationAndBufferizationPass(
40       getBufferizationOptionsForSparsification(
41           options.testBufferizationAnalysisOnly),
42       options.sparsificationOptions(), options.createSparseDeallocs,
43       options.enableRuntimeLibrary, options.enableBufferInitialization,
44       options.vectorLength,
45       /*enableVLAVectorization=*/options.armSVE,
46       /*enableSIMDIndex32=*/options.force32BitVectorIndices,
47       options.enableGPULibgen,
48       options.sparsificationOptions().sparseEmitStrategy,
49       options.sparsificationOptions().parallelizationStrategy));
50 
51   // Bail-early for test setup.
52   if (options.testBufferizationAnalysisOnly)
53     return;
54 
55   // Storage specifier lowering and bufferization wrap-up.
56   pm.addPass(createStorageSpecifierToLLVMPass());
57   pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
58 
59   // GPU code generation.
60   const bool gpuCodegen = options.gpuTriple.hasValue();
61   if (gpuCodegen) {
62     pm.addPass(createSparseGPUCodegenPass());
63     pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass());
64     pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass());
65     pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToNVVMOps());
66   }
67 
68   // Progressively lower to LLVM. Note that the convert-vector-to-llvm
69   // pass is repeated on purpose.
70   // TODO(springerm): Add sparse support to the BufferDeallocation pass and add
71   // it to this pipeline.
72   pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
73   pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
74   pm.addNestedPass<func::FuncOp>(memref::createExpandReallocPass());
75   pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
76   pm.addPass(memref::createExpandStridedMetadataPass());
77   pm.addPass(createLowerAffinePass());
78   pm.addPass(
79       createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions()));
80   pm.addPass(createFinalizeMemRefToLLVMConversionPass());
81   pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass());
82   pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass());
83   pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
84   pm.addPass(createConvertMathToLibmPass());
85   pm.addPass(createConvertComplexToLibmPass());
86   pm.addPass(
87       createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions()));
88   pm.addPass(createConvertComplexToLLVMPass());
89   pm.addPass(
90       createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions()));
91   pm.addPass(createConvertFuncToLLVMPass());
92 
93   // Finalize GPU code generation.
94   if (gpuCodegen) {
95     GpuNVVMAttachTargetOptions nvvmTargetOptions;
96     nvvmTargetOptions.triple = options.gpuTriple;
97     nvvmTargetOptions.chip = options.gpuChip;
98     nvvmTargetOptions.features = options.gpuFeatures;
99     pm.addPass(createGpuNVVMAttachTarget(nvvmTargetOptions));
100     pm.addPass(createGpuToLLVMConversionPass());
101     GpuModuleToBinaryPassOptions gpuModuleToBinaryPassOptions;
102     gpuModuleToBinaryPassOptions.compilationTarget = options.gpuFormat;
103     pm.addPass(createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions));
104   }
105 
106   // Ensure all casts are realized.
107   pm.addPass(createReconcileUnrealizedCastsPass());
108 }
109 
110 //===----------------------------------------------------------------------===//
111 // Pipeline registration.
112 //===----------------------------------------------------------------------===//
113 
114 void mlir::sparse_tensor::registerSparseTensorPipelines() {
115   PassPipelineRegistration<SparsifierOptions>(
116       "sparsifier",
117       "The standard pipeline for taking sparsity-agnostic IR using the"
118       " sparse-tensor type, and lowering it to LLVM IR with concrete"
119       " representations and algorithms for sparse tensors.",
120       buildSparsifier);
121 }
122