1 //===- LowerGpuOpsToROCDLOps.cpp - MLIR GPU to ROCDL lowering passes ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to generate ROCDLIR operations for higher-level 10 // GPU operations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 15 #include "mlir/Conversion/GPUToROCDL/GPUToROCDLPass.h" 16 17 #include "mlir/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.h" 18 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 19 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 20 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 21 #include "mlir/Conversion/LLVMCommon/LoweringOptions.h" 22 #include "mlir/Conversion/LLVMCommon/Pattern.h" 23 #include "mlir/Conversion/LLVMCommon/TypeConverter.h" 24 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 25 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 26 #include "mlir/Dialect/Func/IR/FuncOps.h" 27 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 28 #include "mlir/Dialect/GPU/Transforms/Passes.h" 29 #include "mlir/Dialect/LLVMIR/ROCDLDialect.h" 30 #include "mlir/Dialect/Math/IR/Math.h" 31 #include "mlir/Dialect/Vector/IR/VectorOps.h" 32 #include "mlir/Pass/Pass.h" 33 #include "mlir/Transforms/DialectConversion.h" 34 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 35 #include "llvm/Support/FormatVariadic.h" 36 37 #include "../GPUCommon/GPUOpsLowering.h" 38 #include "../GPUCommon/IndexIntrinsicsOpLowering.h" 39 #include "../GPUCommon/OpToFuncCallLowering.h" 40 #include "../PassDetail.h" 41 42 using namespace mlir; 43 44 /// Returns true if the given `gpu.func` can be safely called using the bare 45 /// pointer calling convention. 46 static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) { 47 bool canBeBare = true; 48 for (Type type : func.getArgumentTypes()) 49 if (auto memrefTy = type.dyn_cast<BaseMemRefType>()) 50 canBeBare &= LLVMTypeConverter::canConvertToBarePtr(memrefTy); 51 return canBeBare; 52 } 53 54 namespace { 55 56 /// Import the GPU Ops to ROCDL Patterns. 57 #include "GPUToROCDL.cpp.inc" 58 59 // A pass that replaces all occurrences of GPU device operations with their 60 // corresponding ROCDL equivalent. 61 // 62 // This pass only handles device code and is not meant to be run on GPU host 63 // code. 64 struct LowerGpuOpsToROCDLOpsPass 65 : public ConvertGpuOpsToROCDLOpsBase<LowerGpuOpsToROCDLOpsPass> { 66 LowerGpuOpsToROCDLOpsPass() = default; 67 LowerGpuOpsToROCDLOpsPass(const std::string &chipset, unsigned indexBitwidth, 68 bool useBarePtrCallConv, 69 gpu::amd::Runtime runtime) { 70 if (this->chipset.getNumOccurrences() == 0) 71 this->chipset = chipset; 72 if (this->indexBitwidth.getNumOccurrences() == 0) 73 this->indexBitwidth = indexBitwidth; 74 if (this->useBarePtrCallConv.getNumOccurrences() == 0) 75 this->useBarePtrCallConv = useBarePtrCallConv; 76 if (this->runtime.getNumOccurrences() == 0) 77 this->runtime = runtime; 78 } 79 80 void runOnOperation() override { 81 gpu::GPUModuleOp m = getOperation(); 82 MLIRContext *ctx = m.getContext(); 83 84 // Request C wrapper emission. 85 for (auto func : m.getOps<func::FuncOp>()) { 86 func->setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(), 87 UnitAttr::get(ctx)); 88 } 89 90 FailureOr<amdgpu::Chipset> maybeChipset = amdgpu::Chipset::parse(chipset); 91 if (failed(maybeChipset)) { 92 emitError(UnknownLoc::get(ctx), "Invalid chipset name: " + chipset); 93 return signalPassFailure(); 94 } 95 96 /// Customize the bitwidth used for the device side index computations. 97 LowerToLLVMOptions options( 98 ctx, DataLayout(cast<DataLayoutOpInterface>(m.getOperation()))); 99 if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout) 100 options.overrideIndexBitwidth(indexBitwidth); 101 102 if (useBarePtrCallConv) { 103 options.useBarePtrCallConv = true; 104 WalkResult canUseBarePointers = 105 m.walk([](gpu::GPUFuncOp func) -> WalkResult { 106 if (canBeCalledWithBarePointers(func)) 107 return WalkResult::advance(); 108 return WalkResult::interrupt(); 109 }); 110 if (canUseBarePointers.wasInterrupted()) { 111 emitError(UnknownLoc::get(ctx), 112 "bare pointer calling convention requires all memrefs to " 113 "have static shape and use the identity map"); 114 return signalPassFailure(); 115 } 116 } 117 118 LLVMTypeConverter converter(ctx, options); 119 120 RewritePatternSet patterns(ctx); 121 RewritePatternSet llvmPatterns(ctx); 122 123 populateGpuRewritePatterns(patterns); 124 (void)applyPatternsAndFoldGreedily(m, std::move(patterns)); 125 126 mlir::arith::populateArithmeticToLLVMConversionPatterns(converter, 127 llvmPatterns); 128 populateAMDGPUToROCDLConversionPatterns(converter, llvmPatterns, 129 *maybeChipset); 130 populateVectorToLLVMConversionPatterns(converter, llvmPatterns); 131 cf::populateControlFlowToLLVMConversionPatterns(converter, llvmPatterns); 132 populateFuncToLLVMConversionPatterns(converter, llvmPatterns); 133 populateMemRefToLLVMConversionPatterns(converter, llvmPatterns); 134 populateGpuToROCDLConversionPatterns(converter, llvmPatterns, runtime); 135 LLVMConversionTarget target(getContext()); 136 configureGpuToROCDLConversionLegality(target); 137 if (failed(applyPartialConversion(m, target, std::move(llvmPatterns)))) 138 signalPassFailure(); 139 } 140 }; 141 142 } // namespace 143 144 void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) { 145 target.addIllegalOp<func::FuncOp>(); 146 target.addLegalDialect<::mlir::LLVM::LLVMDialect>(); 147 target.addLegalDialect<ROCDL::ROCDLDialect>(); 148 target.addIllegalDialect<gpu::GPUDialect>(); 149 target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FAbsOp, 150 LLVM::FCeilOp, LLVM::FFloorOp, LLVM::LogOp, LLVM::Log10Op, 151 LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp, LLVM::SqrtOp>(); 152 153 // TODO: Remove once we support replacing non-root ops. 154 target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp, gpu::ModuleEndOp>(); 155 } 156 157 void mlir::populateGpuToROCDLConversionPatterns( 158 LLVMTypeConverter &converter, RewritePatternSet &patterns, 159 mlir::gpu::amd::Runtime runtime) { 160 using mlir::gpu::amd::Runtime; 161 162 populateWithGenerated(patterns); 163 patterns 164 .add<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, ROCDL::ThreadIdXOp, 165 ROCDL::ThreadIdYOp, ROCDL::ThreadIdZOp>, 166 GPUIndexIntrinsicOpLowering<gpu::BlockDimOp, ROCDL::BlockDimXOp, 167 ROCDL::BlockDimYOp, ROCDL::BlockDimZOp>, 168 GPUIndexIntrinsicOpLowering<gpu::BlockIdOp, ROCDL::BlockIdXOp, 169 ROCDL::BlockIdYOp, ROCDL::BlockIdZOp>, 170 GPUIndexIntrinsicOpLowering<gpu::GridDimOp, ROCDL::GridDimXOp, 171 ROCDL::GridDimYOp, ROCDL::GridDimZOp>, 172 GPUReturnOpLowering>(converter); 173 patterns.add<GPUFuncOpLowering>( 174 converter, /*allocaAddrSpace=*/5, 175 StringAttr::get(&converter.getContext(), 176 ROCDL::ROCDLDialect::getKernelFuncAttrName())); 177 if (Runtime::HIP == runtime) { 178 patterns.add<GPUPrintfOpToHIPLowering>(converter); 179 } else if (Runtime::OpenCL == runtime) { 180 // Use address space = 4 to match the OpenCL definition of printf() 181 patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/4); 182 } 183 184 patterns.add<OpToFuncCallLowering<math::AbsFOp>>(converter, "__ocml_fabs_f32", 185 "__ocml_fabs_f64"); 186 patterns.add<OpToFuncCallLowering<math::AtanOp>>(converter, "__ocml_atan_f32", 187 "__ocml_atan_f64"); 188 patterns.add<OpToFuncCallLowering<math::Atan2Op>>( 189 converter, "__ocml_atan2_f32", "__ocml_atan2_f64"); 190 patterns.add<OpToFuncCallLowering<math::CeilOp>>(converter, "__ocml_ceil_f32", 191 "__ocml_ceil_f64"); 192 patterns.add<OpToFuncCallLowering<math::CosOp>>(converter, "__ocml_cos_f32", 193 "__ocml_cos_f64"); 194 patterns.add<OpToFuncCallLowering<math::ExpOp>>(converter, "__ocml_exp_f32", 195 "__ocml_exp_f64"); 196 patterns.add<OpToFuncCallLowering<math::Exp2Op>>(converter, "__ocml_exp2_f32", 197 "__ocml_exp2_f64"); 198 patterns.add<OpToFuncCallLowering<math::ExpM1Op>>( 199 converter, "__ocml_expm1_f32", "__ocml_expm1_f64"); 200 patterns.add<OpToFuncCallLowering<math::FloorOp>>( 201 converter, "__ocml_floor_f32", "__ocml_floor_f64"); 202 patterns.add<OpToFuncCallLowering<math::LogOp>>(converter, "__ocml_log_f32", 203 "__ocml_log_f64"); 204 patterns.add<OpToFuncCallLowering<math::Log10Op>>( 205 converter, "__ocml_log10_f32", "__ocml_log10_f64"); 206 patterns.add<OpToFuncCallLowering<math::Log1pOp>>( 207 converter, "__ocml_log1p_f32", "__ocml_log1p_f64"); 208 patterns.add<OpToFuncCallLowering<math::Log2Op>>(converter, "__ocml_log2_f32", 209 "__ocml_log2_f64"); 210 patterns.add<OpToFuncCallLowering<math::PowFOp>>(converter, "__ocml_pow_f32", 211 "__ocml_pow_f64"); 212 patterns.add<OpToFuncCallLowering<math::RsqrtOp>>( 213 converter, "__ocml_rsqrt_f32", "__ocml_rsqrt_f64"); 214 patterns.add<OpToFuncCallLowering<math::SinOp>>(converter, "__ocml_sin_f32", 215 "__ocml_sin_f64"); 216 patterns.add<OpToFuncCallLowering<math::SqrtOp>>(converter, "__ocml_sqrt_f32", 217 "__ocml_sqrt_f64"); 218 patterns.add<OpToFuncCallLowering<math::TanhOp>>(converter, "__ocml_tanh_f32", 219 "__ocml_tanh_f64"); 220 } 221 222 std::unique_ptr<OperationPass<gpu::GPUModuleOp>> 223 mlir::createLowerGpuOpsToROCDLOpsPass(const std::string &chipset, 224 unsigned indexBitwidth, 225 bool useBarePtrCallConv, 226 gpu::amd::Runtime runtime) { 227 return std::make_unique<LowerGpuOpsToROCDLOpsPass>( 228 chipset, indexBitwidth, useBarePtrCallConv, runtime); 229 } 230