1 //===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/SparseTensor/Pipelines/Passes.h" 10 11 #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h" 12 #include "mlir/Conversion/Passes.h" 13 #include "mlir/Dialect/Arith/Transforms/Passes.h" 14 #include "mlir/Dialect/Bufferization/Transforms/Bufferize.h" 15 #include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h" 16 #include "mlir/Dialect/Bufferization/Transforms/Passes.h" 17 #include "mlir/Dialect/Func/IR/FuncOps.h" 18 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 19 #include "mlir/Dialect/GPU/Transforms/Passes.h" 20 #include "mlir/Dialect/LLVMIR/NVVMDialect.h" 21 #include "mlir/Dialect/Linalg/Passes.h" 22 #include "mlir/Dialect/MemRef/Transforms/Passes.h" 23 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 24 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 25 #include "mlir/Pass/PassManager.h" 26 #include "mlir/Transforms/Passes.h" 27 28 //===----------------------------------------------------------------------===// 29 // Pipeline implementation. 30 //===----------------------------------------------------------------------===// 31 32 void mlir::sparse_tensor::buildSparseCompiler( 33 OpPassManager &pm, const SparseCompilerOptions &options) { 34 // Rewrite named linalg ops into generic ops. 35 pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass()); 36 37 // Sparsification and bufferization mini-pipeline. 38 pm.addPass(createSparsificationAndBufferizationPass( 39 getBufferizationOptionsForSparsification( 40 options.testBufferizationAnalysisOnly), 41 options.sparsificationOptions(), options.createSparseDeallocs, 42 options.enableRuntimeLibrary, options.enableBufferInitialization, 43 options.vectorLength, 44 /*enableVLAVectorization=*/options.armSVE, 45 /*enableSIMDIndex32=*/options.force32BitVectorIndices, 46 options.enableGPULibgen)); 47 48 // Bail-early for test setup. 49 if (options.testBufferizationAnalysisOnly) 50 return; 51 52 // Storage specifier lowering and bufferization wrap-up. 53 pm.addPass(createStorageSpecifierToLLVMPass()); 54 pm.addNestedPass<func::FuncOp>(createCanonicalizerPass()); 55 pm.addNestedPass<func::FuncOp>( 56 mlir::bufferization::createFinalizingBufferizePass()); 57 58 // GPU code generation. 59 const bool gpuCodegen = options.gpuTriple.hasValue(); 60 if (gpuCodegen) { 61 pm.addPass(createSparseGPUCodegenPass()); 62 pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass()); 63 pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass()); 64 pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToNVVMOps()); 65 } 66 67 // TODO(springerm): Add sparse support to the BufferDeallocation pass and add 68 // it to this pipeline. 69 pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass()); 70 pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass()); 71 pm.addNestedPass<func::FuncOp>(memref::createExpandReallocPass()); 72 pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass()); 73 pm.addPass(memref::createExpandStridedMetadataPass()); 74 pm.addPass(createLowerAffinePass()); 75 pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); 76 pm.addPass(createFinalizeMemRefToLLVMConversionPass()); 77 pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass()); 78 pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass()); 79 pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass()); 80 pm.addPass(createConvertMathToLibmPass()); 81 pm.addPass(createConvertComplexToLibmPass()); 82 83 // Repeat convert-vector-to-llvm. 84 pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); 85 86 pm.addPass(createConvertComplexToLLVMPass()); 87 pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); 88 pm.addPass(createConvertFuncToLLVMPass()); 89 90 // Finalize GPU code generation. 91 if (gpuCodegen) { 92 GpuNVVMAttachTargetOptions nvvmTargetOptions; 93 nvvmTargetOptions.triple = options.gpuTriple; 94 nvvmTargetOptions.chip = options.gpuChip; 95 nvvmTargetOptions.features = options.gpuFeatures; 96 pm.addPass(createGpuNVVMAttachTarget(nvvmTargetOptions)); 97 pm.addPass(createGpuToLLVMConversionPass()); 98 GpuModuleToBinaryPassOptions gpuModuleToBinaryPassOptions; 99 gpuModuleToBinaryPassOptions.compilationTarget = options.gpuFormat; 100 pm.addPass(createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions)); 101 } 102 103 pm.addPass(createReconcileUnrealizedCastsPass()); 104 } 105 106 //===----------------------------------------------------------------------===// 107 // Pipeline registration. 108 //===----------------------------------------------------------------------===// 109 110 void mlir::sparse_tensor::registerSparseTensorPipelines() { 111 PassPipelineRegistration<SparseCompilerOptions>( 112 "sparse-compiler", 113 "The standard pipeline for taking sparsity-agnostic IR using the" 114 " sparse-tensor type, and lowering it to LLVM IR with concrete" 115 " representations and algorithms for sparse tensors.", 116 buildSparseCompiler); 117 } 118