1 //===- LowerGpuOpsToROCDLOps.cpp - MLIR GPU to ROCDL lowering passes ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to generate ROCDLIR operations for higher-level 10 // GPU operations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 15 #include "mlir/Conversion/GPUToROCDL/GPUToROCDLPass.h" 16 17 #include "mlir/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.h" 18 #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" 19 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 20 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 21 #include "mlir/Conversion/LLVMCommon/LoweringOptions.h" 22 #include "mlir/Conversion/LLVMCommon/Pattern.h" 23 #include "mlir/Conversion/LLVMCommon/TypeConverter.h" 24 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 25 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 26 #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h" 27 #include "mlir/Dialect/MemRef/IR/MemRef.h" 28 #include "mlir/Dialect/Func/IR/FuncOps.h" 29 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 30 #include "mlir/Dialect/GPU/Transforms/Passes.h" 31 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 32 #include "mlir/Dialect/LLVMIR/ROCDLDialect.h" 33 #include "mlir/Dialect/Math/IR/Math.h" 34 #include "mlir/Dialect/Vector/IR/VectorOps.h" 35 #include "mlir/IR/BuiltinAttributes.h" 36 #include "mlir/Pass/Pass.h" 37 #include "mlir/Transforms/DialectConversion.h" 38 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 39 #include "llvm/Support/FormatVariadic.h" 40 41 #include "../GPUCommon/GPUOpsLowering.h" 42 #include "../GPUCommon/IndexIntrinsicsOpLowering.h" 43 #include "../GPUCommon/OpToFuncCallLowering.h" 44 45 namespace mlir { 46 #define GEN_PASS_DEF_CONVERTGPUOPSTOROCDLOPS 47 #include "mlir/Conversion/Passes.h.inc" 48 } // namespace mlir 49 50 using namespace mlir; 51 52 /// Returns true if the given `gpu.func` can be safely called using the bare 53 /// pointer calling convention. 54 static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) { 55 bool canBeBare = true; 56 for (Type type : func.getArgumentTypes()) 57 if (auto memrefTy = type.dyn_cast<BaseMemRefType>()) 58 canBeBare &= LLVMTypeConverter::canConvertToBarePtr(memrefTy); 59 return canBeBare; 60 } 61 62 namespace { 63 64 /// Import the GPU Ops to ROCDL Patterns. 65 #include "GPUToROCDL.cpp.inc" 66 67 // A pass that replaces all occurrences of GPU device operations with their 68 // corresponding ROCDL equivalent. 69 // 70 // This pass only handles device code and is not meant to be run on GPU host 71 // code. 72 struct LowerGpuOpsToROCDLOpsPass 73 : public impl::ConvertGpuOpsToROCDLOpsBase<LowerGpuOpsToROCDLOpsPass> { 74 LowerGpuOpsToROCDLOpsPass() = default; 75 LowerGpuOpsToROCDLOpsPass(const std::string &chipset, unsigned indexBitwidth, 76 bool useBarePtrCallConv, 77 gpu::amd::Runtime runtime) { 78 if (this->chipset.getNumOccurrences() == 0) 79 this->chipset = chipset; 80 if (this->indexBitwidth.getNumOccurrences() == 0) 81 this->indexBitwidth = indexBitwidth; 82 if (this->useBarePtrCallConv.getNumOccurrences() == 0) 83 this->useBarePtrCallConv = useBarePtrCallConv; 84 if (this->runtime.getNumOccurrences() == 0) 85 this->runtime = runtime; 86 } 87 88 void runOnOperation() override { 89 gpu::GPUModuleOp m = getOperation(); 90 MLIRContext *ctx = m.getContext(); 91 92 // Request C wrapper emission. 93 for (auto func : m.getOps<func::FuncOp>()) { 94 func->setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(), 95 UnitAttr::get(ctx)); 96 } 97 98 FailureOr<amdgpu::Chipset> maybeChipset = amdgpu::Chipset::parse(chipset); 99 if (failed(maybeChipset)) { 100 emitError(UnknownLoc::get(ctx), "Invalid chipset name: " + chipset); 101 return signalPassFailure(); 102 } 103 104 /// Customize the bitwidth used for the device side index computations. 105 LowerToLLVMOptions options( 106 ctx, DataLayout(cast<DataLayoutOpInterface>(m.getOperation()))); 107 if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout) 108 options.overrideIndexBitwidth(indexBitwidth); 109 110 if (useBarePtrCallConv) { 111 options.useBarePtrCallConv = true; 112 WalkResult canUseBarePointers = 113 m.walk([](gpu::GPUFuncOp func) -> WalkResult { 114 if (canBeCalledWithBarePointers(func)) 115 return WalkResult::advance(); 116 return WalkResult::interrupt(); 117 }); 118 if (canUseBarePointers.wasInterrupted()) { 119 emitError(UnknownLoc::get(ctx), 120 "bare pointer calling convention requires all memrefs to " 121 "have static shape and use the identity map"); 122 return signalPassFailure(); 123 } 124 } 125 126 // Apply in-dialect lowering. In-dialect lowering will replace 127 // ops which need to be lowered further, which is not supported by a 128 // single conversion pass. 129 { 130 RewritePatternSet patterns(ctx); 131 populateGpuRewritePatterns(patterns); 132 (void)applyPatternsAndFoldGreedily(m, std::move(patterns)); 133 } 134 135 // Apply memory space lowering. The target uses 3 for workgroup memory and 5 136 // for private memory. 137 { 138 RewritePatternSet patterns(ctx); 139 TypeConverter typeConverter; 140 typeConverter.addConversion([](Type t) { return t; }); 141 gpu::populateMemorySpaceAttributeTypeConversions( 142 typeConverter, [](gpu::AddressSpace space) { 143 switch (space) { 144 case gpu::AddressSpace::Global: 145 return 1; 146 case gpu::AddressSpace::Workgroup: 147 return 3; 148 case gpu::AddressSpace::Private: 149 return 5; 150 } 151 llvm_unreachable("unknown address space enum value"); 152 return 0; 153 }); 154 ConversionTarget target(getContext()); 155 gpu::populateLowerMemorySpaceOpLegality(target); 156 gpu::populateMemorySpaceLoweringPatterns(typeConverter, patterns); 157 if (failed(applyFullConversion(m, target, std::move(patterns)))) 158 return signalPassFailure(); 159 } 160 161 LLVMTypeConverter converter(ctx, options); 162 RewritePatternSet llvmPatterns(ctx); 163 164 mlir::arith::populateArithToLLVMConversionPatterns(converter, llvmPatterns); 165 populateAMDGPUToROCDLConversionPatterns(converter, llvmPatterns, 166 *maybeChipset); 167 populateVectorToLLVMConversionPatterns(converter, llvmPatterns); 168 cf::populateControlFlowToLLVMConversionPatterns(converter, llvmPatterns); 169 populateFuncToLLVMConversionPatterns(converter, llvmPatterns); 170 populateMemRefToLLVMConversionPatterns(converter, llvmPatterns); 171 populateGpuToROCDLConversionPatterns(converter, llvmPatterns, runtime); 172 LLVMConversionTarget target(getContext()); 173 configureGpuToROCDLConversionLegality(target); 174 if (failed(applyPartialConversion(m, target, std::move(llvmPatterns)))) 175 signalPassFailure(); 176 177 // Manually rewrite known block size attributes so the LLVMIR translation 178 // infrastructure can pick them up. 179 m.walk([ctx](LLVM::LLVMFuncOp op) { 180 if (auto blockSizes = 181 op->removeAttr(gpu::GPUFuncOp::getKnownBlockSizeAttrName()) 182 .dyn_cast_or_null<DenseI32ArrayAttr>()) { 183 op->setAttr(ROCDL::ROCDLDialect::getReqdWorkGroupSizeAttrName(), 184 blockSizes); 185 // Also set up the rocdl.flat_work_group_size attribute to prevent 186 // conflicting metadata. 187 uint32_t flatSize = 1; 188 for (uint32_t size : blockSizes.asArrayRef()) { 189 flatSize *= size; 190 } 191 StringAttr flatSizeAttr = 192 StringAttr::get(ctx, Twine(flatSize) + "," + Twine(flatSize)); 193 op->setAttr(ROCDL::ROCDLDialect::getFlatWorkGroupSizeAttrName(), 194 flatSizeAttr); 195 } 196 }); 197 } 198 }; 199 200 } // namespace 201 202 void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) { 203 target.addIllegalOp<func::FuncOp>(); 204 target.addLegalDialect<::mlir::LLVM::LLVMDialect>(); 205 target.addLegalDialect<ROCDL::ROCDLDialect>(); 206 target.addIllegalDialect<gpu::GPUDialect>(); 207 target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FAbsOp, 208 LLVM::FCeilOp, LLVM::FFloorOp, LLVM::LogOp, LLVM::Log10Op, 209 LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp, LLVM::SqrtOp>(); 210 211 // TODO: Remove once we support replacing non-root ops. 212 target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp, gpu::ModuleEndOp>(); 213 } 214 215 template <typename OpTy> 216 static void populateOpPatterns(LLVMTypeConverter &converter, 217 RewritePatternSet &patterns, StringRef f32Func, 218 StringRef f64Func) { 219 patterns.add<ScalarizeVectorOpLowering<OpTy>>(converter); 220 patterns.add<OpToFuncCallLowering<OpTy>>(converter, f32Func, f64Func); 221 } 222 223 void mlir::populateGpuToROCDLConversionPatterns( 224 LLVMTypeConverter &converter, RewritePatternSet &patterns, 225 mlir::gpu::amd::Runtime runtime) { 226 using mlir::gpu::amd::Runtime; 227 228 populateWithGenerated(patterns); 229 patterns 230 .add<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, ROCDL::ThreadIdXOp, 231 ROCDL::ThreadIdYOp, ROCDL::ThreadIdZOp>>( 232 converter, gpu::GPUFuncOp::getKnownBlockSizeAttrName()); 233 patterns.add<GPUIndexIntrinsicOpLowering< 234 gpu::BlockIdOp, ROCDL::BlockIdXOp, ROCDL::BlockIdYOp, ROCDL::BlockIdZOp>>( 235 converter, gpu::GPUFuncOp::getKnownGridSizeAttrName()); 236 patterns 237 .add<GPUIndexIntrinsicOpLowering<gpu::BlockDimOp, ROCDL::BlockDimXOp, 238 ROCDL::BlockDimYOp, ROCDL::BlockDimZOp>, 239 GPUIndexIntrinsicOpLowering<gpu::GridDimOp, ROCDL::GridDimXOp, 240 ROCDL::GridDimYOp, ROCDL::GridDimZOp>, 241 GPUReturnOpLowering>(converter); 242 patterns.add<GPUFuncOpLowering>( 243 converter, 244 /*allocaAddrSpace=*/ROCDL::ROCDLDialect::kPrivateMemoryAddressSpace, 245 /*workgroupAddrSpace=*/ROCDL::ROCDLDialect::kSharedMemoryAddressSpace, 246 StringAttr::get(&converter.getContext(), 247 ROCDL::ROCDLDialect::getKernelFuncAttrName())); 248 if (Runtime::HIP == runtime) { 249 patterns.add<GPUPrintfOpToHIPLowering>(converter); 250 } else if (Runtime::OpenCL == runtime) { 251 // Use address space = 4 to match the OpenCL definition of printf() 252 patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/4); 253 } 254 255 populateOpPatterns<math::AbsFOp>(converter, patterns, "__ocml_fabs_f32", 256 "__ocml_fabs_f64"); 257 populateOpPatterns<math::AtanOp>(converter, patterns, "__ocml_atan_f32", 258 "__ocml_atan_f64"); 259 populateOpPatterns<math::Atan2Op>(converter, patterns, "__ocml_atan2_f32", 260 "__ocml_atan2_f64"); 261 populateOpPatterns<math::CbrtOp>(converter, patterns, "__ocml_cbrt_f32", 262 "__ocml_cbrt_f64"); 263 populateOpPatterns<math::CeilOp>(converter, patterns, "__ocml_ceil_f32", 264 "__ocml_ceil_f64"); 265 populateOpPatterns<math::CosOp>(converter, patterns, "__ocml_cos_f32", 266 "__ocml_cos_f64"); 267 populateOpPatterns<math::ExpOp>(converter, patterns, "__ocml_exp_f32", 268 "__ocml_exp_f64"); 269 populateOpPatterns<math::Exp2Op>(converter, patterns, "__ocml_exp2_f32", 270 "__ocml_exp2_f64"); 271 populateOpPatterns<math::ExpM1Op>(converter, patterns, "__ocml_expm1_f32", 272 "__ocml_expm1_f64"); 273 populateOpPatterns<math::FloorOp>(converter, patterns, "__ocml_floor_f32", 274 "__ocml_floor_f64"); 275 populateOpPatterns<math::LogOp>(converter, patterns, "__ocml_log_f32", 276 "__ocml_log_f64"); 277 populateOpPatterns<math::Log10Op>(converter, patterns, "__ocml_log10_f32", 278 "__ocml_log10_f64"); 279 populateOpPatterns<math::Log1pOp>(converter, patterns, "__ocml_log1p_f32", 280 "__ocml_log1p_f64"); 281 populateOpPatterns<math::Log2Op>(converter, patterns, "__ocml_log2_f32", 282 "__ocml_log2_f64"); 283 populateOpPatterns<math::PowFOp>(converter, patterns, "__ocml_pow_f32", 284 "__ocml_pow_f64"); 285 populateOpPatterns<math::RsqrtOp>(converter, patterns, "__ocml_rsqrt_f32", 286 "__ocml_rsqrt_f64"); 287 populateOpPatterns<math::SinOp>(converter, patterns, "__ocml_sin_f32", 288 "__ocml_sin_f64"); 289 populateOpPatterns<math::SqrtOp>(converter, patterns, "__ocml_sqrt_f32", 290 "__ocml_sqrt_f64"); 291 populateOpPatterns<math::TanhOp>(converter, patterns, "__ocml_tanh_f32", 292 "__ocml_tanh_f64"); 293 populateOpPatterns<math::TanOp>(converter, patterns, "__ocml_tan_f32", 294 "__ocml_tan_f64"); 295 } 296 297 std::unique_ptr<OperationPass<gpu::GPUModuleOp>> 298 mlir::createLowerGpuOpsToROCDLOpsPass(const std::string &chipset, 299 unsigned indexBitwidth, 300 bool useBarePtrCallConv, 301 gpu::amd::Runtime runtime) { 302 return std::make_unique<LowerGpuOpsToROCDLOpsPass>( 303 chipset, indexBitwidth, useBarePtrCallConv, runtime); 304 } 305