1 //===- LowerGpuOpsToROCDLOps.cpp - MLIR GPU to ROCDL lowering passes ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to generate ROCDLIR operations for higher-level 10 // GPU operations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 15 #include "mlir/Conversion/GPUToROCDL/GPUToROCDLPass.h" 16 17 #include "mlir/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.h" 18 #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" 19 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 20 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 21 #include "mlir/Conversion/LLVMCommon/LoweringOptions.h" 22 #include "mlir/Conversion/LLVMCommon/Pattern.h" 23 #include "mlir/Conversion/LLVMCommon/TypeConverter.h" 24 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 25 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 26 #include "mlir/Dialect/Func/IR/FuncOps.h" 27 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 28 #include "mlir/Dialect/GPU/Transforms/Passes.h" 29 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 30 #include "mlir/Dialect/LLVMIR/ROCDLDialect.h" 31 #include "mlir/Dialect/Math/IR/Math.h" 32 #include "mlir/Dialect/Vector/IR/VectorOps.h" 33 #include "mlir/IR/BuiltinAttributes.h" 34 #include "mlir/Pass/Pass.h" 35 #include "mlir/Transforms/DialectConversion.h" 36 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 37 #include "llvm/Support/FormatVariadic.h" 38 39 #include "../GPUCommon/GPUOpsLowering.h" 40 #include "../GPUCommon/IndexIntrinsicsOpLowering.h" 41 #include "../GPUCommon/OpToFuncCallLowering.h" 42 43 namespace mlir { 44 #define GEN_PASS_DEF_CONVERTGPUOPSTOROCDLOPS 45 #include "mlir/Conversion/Passes.h.inc" 46 } // namespace mlir 47 48 using namespace mlir; 49 50 /// Returns true if the given `gpu.func` can be safely called using the bare 51 /// pointer calling convention. 52 static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) { 53 bool canBeBare = true; 54 for (Type type : func.getArgumentTypes()) 55 if (auto memrefTy = type.dyn_cast<BaseMemRefType>()) 56 canBeBare &= LLVMTypeConverter::canConvertToBarePtr(memrefTy); 57 return canBeBare; 58 } 59 60 namespace { 61 62 /// Import the GPU Ops to ROCDL Patterns. 63 #include "GPUToROCDL.cpp.inc" 64 65 // A pass that replaces all occurrences of GPU device operations with their 66 // corresponding ROCDL equivalent. 67 // 68 // This pass only handles device code and is not meant to be run on GPU host 69 // code. 70 struct LowerGpuOpsToROCDLOpsPass 71 : public impl::ConvertGpuOpsToROCDLOpsBase<LowerGpuOpsToROCDLOpsPass> { 72 LowerGpuOpsToROCDLOpsPass() = default; 73 LowerGpuOpsToROCDLOpsPass(const std::string &chipset, unsigned indexBitwidth, 74 bool useBarePtrCallConv, 75 gpu::amd::Runtime runtime) { 76 if (this->chipset.getNumOccurrences() == 0) 77 this->chipset = chipset; 78 if (this->indexBitwidth.getNumOccurrences() == 0) 79 this->indexBitwidth = indexBitwidth; 80 if (this->useBarePtrCallConv.getNumOccurrences() == 0) 81 this->useBarePtrCallConv = useBarePtrCallConv; 82 if (this->runtime.getNumOccurrences() == 0) 83 this->runtime = runtime; 84 } 85 86 void runOnOperation() override { 87 gpu::GPUModuleOp m = getOperation(); 88 MLIRContext *ctx = m.getContext(); 89 90 // Request C wrapper emission. 91 for (auto func : m.getOps<func::FuncOp>()) { 92 func->setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(), 93 UnitAttr::get(ctx)); 94 } 95 96 FailureOr<amdgpu::Chipset> maybeChipset = amdgpu::Chipset::parse(chipset); 97 if (failed(maybeChipset)) { 98 emitError(UnknownLoc::get(ctx), "Invalid chipset name: " + chipset); 99 return signalPassFailure(); 100 } 101 102 /// Customize the bitwidth used for the device side index computations. 103 LowerToLLVMOptions options( 104 ctx, DataLayout(cast<DataLayoutOpInterface>(m.getOperation()))); 105 if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout) 106 options.overrideIndexBitwidth(indexBitwidth); 107 108 if (useBarePtrCallConv) { 109 options.useBarePtrCallConv = true; 110 WalkResult canUseBarePointers = 111 m.walk([](gpu::GPUFuncOp func) -> WalkResult { 112 if (canBeCalledWithBarePointers(func)) 113 return WalkResult::advance(); 114 return WalkResult::interrupt(); 115 }); 116 if (canUseBarePointers.wasInterrupted()) { 117 emitError(UnknownLoc::get(ctx), 118 "bare pointer calling convention requires all memrefs to " 119 "have static shape and use the identity map"); 120 return signalPassFailure(); 121 } 122 } 123 124 LLVMTypeConverter converter(ctx, options); 125 126 RewritePatternSet patterns(ctx); 127 RewritePatternSet llvmPatterns(ctx); 128 129 populateGpuRewritePatterns(patterns); 130 (void)applyPatternsAndFoldGreedily(m, std::move(patterns)); 131 132 mlir::arith::populateArithToLLVMConversionPatterns(converter, llvmPatterns); 133 populateAMDGPUToROCDLConversionPatterns(converter, llvmPatterns, 134 *maybeChipset); 135 populateVectorToLLVMConversionPatterns(converter, llvmPatterns); 136 cf::populateControlFlowToLLVMConversionPatterns(converter, llvmPatterns); 137 populateFuncToLLVMConversionPatterns(converter, llvmPatterns); 138 populateMemRefToLLVMConversionPatterns(converter, llvmPatterns); 139 populateGpuToROCDLConversionPatterns(converter, llvmPatterns, runtime); 140 LLVMConversionTarget target(getContext()); 141 configureGpuToROCDLConversionLegality(target); 142 if (failed(applyPartialConversion(m, target, std::move(llvmPatterns)))) 143 signalPassFailure(); 144 145 // Manually rewrite known block size attributes so the LLVMIR translation 146 // infrastructure can pick them up. 147 m.walk([ctx](LLVM::LLVMFuncOp op) { 148 if (auto blockSizes = 149 op->removeAttr(gpu::GPUFuncOp::getKnownBlockSizeAttrName()) 150 .dyn_cast_or_null<DenseI32ArrayAttr>()) { 151 op->setAttr(ROCDL::ROCDLDialect::getReqdWorkGroupSizeAttrName(), 152 blockSizes); 153 // Also set up the rocdl.flat_work_group_size attribute to prevent 154 // conflicting metadata. 155 uint32_t flatSize = 1; 156 for (uint32_t size : blockSizes.asArrayRef()) { 157 flatSize *= size; 158 } 159 StringAttr flatSizeAttr = 160 StringAttr::get(ctx, Twine(flatSize) + "," + Twine(flatSize)); 161 op->setAttr(ROCDL::ROCDLDialect::getFlatWorkGroupSizeAttrName(), 162 flatSizeAttr); 163 } 164 }); 165 } 166 }; 167 168 } // namespace 169 170 void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) { 171 target.addIllegalOp<func::FuncOp>(); 172 target.addLegalDialect<::mlir::LLVM::LLVMDialect>(); 173 target.addLegalDialect<ROCDL::ROCDLDialect>(); 174 target.addIllegalDialect<gpu::GPUDialect>(); 175 target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FAbsOp, 176 LLVM::FCeilOp, LLVM::FFloorOp, LLVM::LogOp, LLVM::Log10Op, 177 LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp, LLVM::SqrtOp>(); 178 179 // TODO: Remove once we support replacing non-root ops. 180 target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp, gpu::ModuleEndOp>(); 181 } 182 183 template <typename OpTy> 184 static void populateOpPatterns(LLVMTypeConverter &converter, 185 RewritePatternSet &patterns, StringRef f32Func, 186 StringRef f64Func) { 187 patterns.add<ScalarizeVectorOpLowering<OpTy>>(converter); 188 patterns.add<OpToFuncCallLowering<OpTy>>(converter, f32Func, f64Func); 189 } 190 191 void mlir::populateGpuToROCDLConversionPatterns( 192 LLVMTypeConverter &converter, RewritePatternSet &patterns, 193 mlir::gpu::amd::Runtime runtime) { 194 using mlir::gpu::amd::Runtime; 195 196 populateWithGenerated(patterns); 197 patterns 198 .add<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, ROCDL::ThreadIdXOp, 199 ROCDL::ThreadIdYOp, ROCDL::ThreadIdZOp>>( 200 converter, gpu::GPUFuncOp::getKnownBlockSizeAttrName()); 201 patterns.add<GPUIndexIntrinsicOpLowering< 202 gpu::BlockIdOp, ROCDL::BlockIdXOp, ROCDL::BlockIdYOp, ROCDL::BlockIdZOp>>( 203 converter, gpu::GPUFuncOp::getKnownGridSizeAttrName()); 204 patterns 205 .add<GPUIndexIntrinsicOpLowering<gpu::BlockDimOp, ROCDL::BlockDimXOp, 206 ROCDL::BlockDimYOp, ROCDL::BlockDimZOp>, 207 GPUIndexIntrinsicOpLowering<gpu::GridDimOp, ROCDL::GridDimXOp, 208 ROCDL::GridDimYOp, ROCDL::GridDimZOp>, 209 GPUReturnOpLowering>(converter); 210 patterns.add<GPUFuncOpLowering>( 211 converter, /*allocaAddrSpace=*/5, 212 StringAttr::get(&converter.getContext(), 213 ROCDL::ROCDLDialect::getKernelFuncAttrName())); 214 if (Runtime::HIP == runtime) { 215 patterns.add<GPUPrintfOpToHIPLowering>(converter); 216 } else if (Runtime::OpenCL == runtime) { 217 // Use address space = 4 to match the OpenCL definition of printf() 218 patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/4); 219 } 220 221 populateOpPatterns<math::AbsFOp>(converter, patterns, "__ocml_fabs_f32", 222 "__ocml_fabs_f64"); 223 populateOpPatterns<math::AtanOp>(converter, patterns, "__ocml_atan_f32", 224 "__ocml_atan_f64"); 225 populateOpPatterns<math::Atan2Op>(converter, patterns, "__ocml_atan2_f32", 226 "__ocml_atan2_f64"); 227 populateOpPatterns<math::CeilOp>(converter, patterns, "__ocml_ceil_f32", 228 "__ocml_ceil_f64"); 229 populateOpPatterns<math::CosOp>(converter, patterns, "__ocml_cos_f32", 230 "__ocml_cos_f64"); 231 populateOpPatterns<math::ExpOp>(converter, patterns, "__ocml_exp_f32", 232 "__ocml_exp_f64"); 233 populateOpPatterns<math::Exp2Op>(converter, patterns, "__ocml_exp2_f32", 234 "__ocml_exp2_f64"); 235 populateOpPatterns<math::ExpM1Op>(converter, patterns, "__ocml_expm1_f32", 236 "__ocml_expm1_f64"); 237 populateOpPatterns<math::FloorOp>(converter, patterns, "__ocml_floor_f32", 238 "__ocml_floor_f64"); 239 populateOpPatterns<math::LogOp>(converter, patterns, "__ocml_log_f32", 240 "__ocml_log_f64"); 241 populateOpPatterns<math::Log10Op>(converter, patterns, "__ocml_log10_f32", 242 "__ocml_log10_f64"); 243 populateOpPatterns<math::Log1pOp>(converter, patterns, "__ocml_log1p_f32", 244 "__ocml_log1p_f64"); 245 populateOpPatterns<math::Log2Op>(converter, patterns, "__ocml_log2_f32", 246 "__ocml_log2_f64"); 247 populateOpPatterns<math::PowFOp>(converter, patterns, "__ocml_pow_f32", 248 "__ocml_pow_f64"); 249 populateOpPatterns<math::RsqrtOp>(converter, patterns, "__ocml_rsqrt_f32", 250 "__ocml_rsqrt_f64"); 251 populateOpPatterns<math::SinOp>(converter, patterns, "__ocml_sin_f32", 252 "__ocml_sin_f64"); 253 populateOpPatterns<math::SqrtOp>(converter, patterns, "__ocml_sqrt_f32", 254 "__ocml_sqrt_f64"); 255 populateOpPatterns<math::TanhOp>(converter, patterns, "__ocml_tanh_f32", 256 "__ocml_tanh_f64"); 257 } 258 259 std::unique_ptr<OperationPass<gpu::GPUModuleOp>> 260 mlir::createLowerGpuOpsToROCDLOpsPass(const std::string &chipset, 261 unsigned indexBitwidth, 262 bool useBarePtrCallConv, 263 gpu::amd::Runtime runtime) { 264 return std::make_unique<LowerGpuOpsToROCDLOpsPass>( 265 chipset, indexBitwidth, useBarePtrCallConv, runtime); 266 } 267