1 //===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/SparseTensor/Pipelines/Passes.h" 10 11 #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" 12 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 13 #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h" 14 #include "mlir/Conversion/Passes.h" 15 #include "mlir/Dialect/Arith/Transforms/Passes.h" 16 #include "mlir/Dialect/Bufferization/Transforms/Bufferize.h" 17 #include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h" 18 #include "mlir/Dialect/Bufferization/Transforms/Passes.h" 19 #include "mlir/Dialect/Func/IR/FuncOps.h" 20 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 21 #include "mlir/Dialect/GPU/Transforms/Passes.h" 22 #include "mlir/Dialect/LLVMIR/NVVMDialect.h" 23 #include "mlir/Dialect/Linalg/Passes.h" 24 #include "mlir/Dialect/MemRef/Transforms/Passes.h" 25 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 26 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 27 #include "mlir/Pass/PassManager.h" 28 #include "mlir/Transforms/Passes.h" 29 30 //===----------------------------------------------------------------------===// 31 // Pipeline implementation. 32 //===----------------------------------------------------------------------===// 33 34 void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm, 35 const SparsifierOptions &options) { 36 // Rewrite named linalg ops into generic ops and apply fusion. 37 pm.addNestedPass<func::FuncOp>(createLinalgGeneralizeNamedOpsPass()); 38 pm.addNestedPass<func::FuncOp>(createLinalgElementwiseOpFusionPass()); 39 40 // Sparsification and bufferization mini-pipeline. 41 pm.addPass(createSparsificationAndBufferizationPass( 42 getBufferizationOptionsForSparsification( 43 options.testBufferizationAnalysisOnly), 44 options.sparsificationOptions(), options.createSparseDeallocs, 45 options.enableRuntimeLibrary, options.enableBufferInitialization, 46 options.vectorLength, 47 /*enableVLAVectorization=*/options.armSVE, 48 /*enableSIMDIndex32=*/options.force32BitVectorIndices, 49 options.enableGPULibgen, 50 options.sparsificationOptions().sparseEmitStrategy, 51 options.sparsificationOptions().parallelizationStrategy)); 52 53 // Bail-early for test setup. 54 if (options.testBufferizationAnalysisOnly) 55 return; 56 57 // Storage specifier lowering and bufferization wrap-up. 58 pm.addPass(createStorageSpecifierToLLVMPass()); 59 pm.addNestedPass<func::FuncOp>(createCanonicalizerPass()); 60 61 // GPU code generation. 62 const bool gpuCodegen = options.gpuTriple.hasValue(); 63 if (gpuCodegen) { 64 pm.addPass(createSparseGPUCodegenPass()); 65 pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass()); 66 pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass()); 67 pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToNVVMOps()); 68 } 69 70 // Progressively lower to LLVM. Note that the convert-vector-to-llvm 71 // pass is repeated on purpose. 72 // TODO(springerm): Add sparse support to the BufferDeallocation pass and add 73 // it to this pipeline. 74 pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass()); 75 pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass()); 76 pm.addNestedPass<func::FuncOp>(memref::createExpandReallocPass()); 77 pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass()); 78 pm.addPass(memref::createExpandStridedMetadataPass()); 79 pm.addPass(createLowerAffinePass()); 80 pm.addPass( 81 createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions())); 82 pm.addPass(createFinalizeMemRefToLLVMConversionPass()); 83 pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass()); 84 pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass()); 85 pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass()); 86 pm.addPass(createConvertMathToLibmPass()); 87 pm.addPass(createConvertComplexToLibmPass()); 88 pm.addPass( 89 createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions())); 90 pm.addPass(createConvertComplexToLLVMPass()); 91 pm.addPass( 92 createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions())); 93 pm.addPass(createConvertFuncToLLVMPass()); 94 pm.addPass(createArithToLLVMConversionPass()); 95 pm.addPass(createConvertControlFlowToLLVMPass()); 96 97 // Finalize GPU code generation. 98 if (gpuCodegen) { 99 GpuNVVMAttachTargetOptions nvvmTargetOptions; 100 nvvmTargetOptions.triple = options.gpuTriple; 101 nvvmTargetOptions.chip = options.gpuChip; 102 nvvmTargetOptions.features = options.gpuFeatures; 103 pm.addPass(createGpuNVVMAttachTarget(nvvmTargetOptions)); 104 pm.addPass(createGpuToLLVMConversionPass()); 105 GpuModuleToBinaryPassOptions gpuModuleToBinaryPassOptions; 106 gpuModuleToBinaryPassOptions.compilationTarget = options.gpuFormat; 107 pm.addPass(createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions)); 108 } 109 110 // Ensure all casts are realized. 111 pm.addPass(createReconcileUnrealizedCastsPass()); 112 } 113 114 //===----------------------------------------------------------------------===// 115 // Pipeline registration. 116 //===----------------------------------------------------------------------===// 117 118 void mlir::sparse_tensor::registerSparseTensorPipelines() { 119 PassPipelineRegistration<SparsifierOptions>( 120 "sparsifier", 121 "The standard pipeline for taking sparsity-agnostic IR using the" 122 " sparse-tensor type, and lowering it to LLVM IR with concrete" 123 " representations and algorithms for sparse tensors.", 124 buildSparsifier); 125 } 126