xref: /llvm-project/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp (revision 888717e85395c22dcc70a7d16ae8459e70df822c)
1 //===- LowerGpuOpsToROCDLOps.cpp - MLIR GPU to ROCDL lowering passes ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a pass to generate ROCDLIR operations for higher-level
10 // GPU operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h"
15 #include "mlir/Conversion/GPUToROCDL/GPUToROCDLPass.h"
16 
17 #include "mlir/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.h"
18 #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h"
19 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h"
20 #include "mlir/Conversion/GPUCommon/GPUCommonPass.h"
21 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
22 #include "mlir/Conversion/LLVMCommon/LoweringOptions.h"
23 #include "mlir/Conversion/LLVMCommon/Pattern.h"
24 #include "mlir/Conversion/LLVMCommon/TypeConverter.h"
25 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h"
26 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
27 #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h"
28 #include "mlir/Dialect/Func/IR/FuncOps.h"
29 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
30 #include "mlir/Dialect/GPU/Transforms/Passes.h"
31 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
32 #include "mlir/Dialect/LLVMIR/ROCDLDialect.h"
33 #include "mlir/Dialect/Math/IR/Math.h"
34 #include "mlir/Dialect/MemRef/IR/MemRef.h"
35 #include "mlir/Dialect/Vector/IR/VectorOps.h"
36 #include "mlir/IR/BuiltinAttributes.h"
37 #include "mlir/Pass/Pass.h"
38 #include "mlir/Transforms/DialectConversion.h"
39 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
40 #include "llvm/Support/FormatVariadic.h"
41 
42 #include "../GPUCommon/GPUOpsLowering.h"
43 #include "../GPUCommon/IndexIntrinsicsOpLowering.h"
44 #include "../GPUCommon/OpToFuncCallLowering.h"
45 
46 namespace mlir {
47 #define GEN_PASS_DEF_CONVERTGPUOPSTOROCDLOPS
48 #include "mlir/Conversion/Passes.h.inc"
49 } // namespace mlir
50 
51 using namespace mlir;
52 
53 /// Returns true if the given `gpu.func` can be safely called using the bare
54 /// pointer calling convention.
55 static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) {
56   bool canBeBare = true;
57   for (Type type : func.getArgumentTypes())
58     if (auto memrefTy = dyn_cast<BaseMemRefType>(type))
59       canBeBare &= LLVMTypeConverter::canConvertToBarePtr(memrefTy);
60   return canBeBare;
61 }
62 
63 namespace {
64 struct GPULaneIdOpToROCDL : ConvertOpToLLVMPattern<gpu::LaneIdOp> {
65   using ConvertOpToLLVMPattern<gpu::LaneIdOp>::ConvertOpToLLVMPattern;
66 
67   LogicalResult
68   matchAndRewrite(gpu::LaneIdOp op, gpu::LaneIdOp::Adaptor adaptor,
69                   ConversionPatternRewriter &rewriter) const override {
70     auto loc = op->getLoc();
71     MLIRContext *context = rewriter.getContext();
72     // convert to:  %mlo = call @llvm.amdgcn.mbcnt.lo(-1, 0)
73     // followed by: %lid = call @llvm.amdgcn.mbcnt.hi(-1, %mlo)
74 
75     Type intTy = IntegerType::get(context, 32);
76     Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
77     Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
78     Value mbcntLo =
79         rewriter.create<ROCDL::MbcntLoOp>(loc, intTy, ValueRange{minus1, zero});
80     Value laneId = rewriter.create<ROCDL::MbcntHiOp>(
81         loc, intTy, ValueRange{minus1, mbcntLo});
82     // Truncate or extend the result depending on the index bitwidth specified
83     // by the LLVMTypeConverter options.
84     const unsigned indexBitwidth = getTypeConverter()->getIndexTypeBitwidth();
85     if (indexBitwidth > 32) {
86       laneId = rewriter.create<LLVM::SExtOp>(
87           loc, IntegerType::get(context, indexBitwidth), laneId);
88     } else if (indexBitwidth < 32) {
89       laneId = rewriter.create<LLVM::TruncOp>(
90           loc, IntegerType::get(context, indexBitwidth), laneId);
91     }
92     rewriter.replaceOp(op, {laneId});
93     return success();
94   }
95 };
96 
97 /// Import the GPU Ops to ROCDL Patterns.
98 #include "GPUToROCDL.cpp.inc"
99 
100 // A pass that replaces all occurrences of GPU device operations with their
101 // corresponding ROCDL equivalent.
102 //
103 // This pass only handles device code and is not meant to be run on GPU host
104 // code.
105 struct LowerGpuOpsToROCDLOpsPass
106     : public impl::ConvertGpuOpsToROCDLOpsBase<LowerGpuOpsToROCDLOpsPass> {
107   LowerGpuOpsToROCDLOpsPass() = default;
108   LowerGpuOpsToROCDLOpsPass(const std::string &chipset, unsigned indexBitwidth,
109                             bool useBarePtrCallConv,
110                             gpu::amd::Runtime runtime) {
111     if (this->chipset.getNumOccurrences() == 0)
112       this->chipset = chipset;
113     if (this->indexBitwidth.getNumOccurrences() == 0)
114       this->indexBitwidth = indexBitwidth;
115     if (this->useBarePtrCallConv.getNumOccurrences() == 0)
116       this->useBarePtrCallConv = useBarePtrCallConv;
117     if (this->runtime.getNumOccurrences() == 0)
118       this->runtime = runtime;
119   }
120 
121   void runOnOperation() override {
122     gpu::GPUModuleOp m = getOperation();
123     MLIRContext *ctx = m.getContext();
124 
125     // Request C wrapper emission.
126     for (auto func : m.getOps<func::FuncOp>()) {
127       func->setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(),
128                     UnitAttr::get(ctx));
129     }
130 
131     FailureOr<amdgpu::Chipset> maybeChipset = amdgpu::Chipset::parse(chipset);
132     if (failed(maybeChipset)) {
133       emitError(UnknownLoc::get(ctx), "Invalid chipset name: " + chipset);
134       return signalPassFailure();
135     }
136 
137     /// Customize the bitwidth used for the device side index computations.
138     LowerToLLVMOptions options(
139         ctx, DataLayout(cast<DataLayoutOpInterface>(m.getOperation())));
140     if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout)
141       options.overrideIndexBitwidth(indexBitwidth);
142     options.useOpaquePointers = useOpaquePointers;
143 
144     if (useBarePtrCallConv) {
145       options.useBarePtrCallConv = true;
146       WalkResult canUseBarePointers =
147           m.walk([](gpu::GPUFuncOp func) -> WalkResult {
148             if (canBeCalledWithBarePointers(func))
149               return WalkResult::advance();
150             return WalkResult::interrupt();
151           });
152       if (canUseBarePointers.wasInterrupted()) {
153         emitError(UnknownLoc::get(ctx),
154                   "bare pointer calling convention requires all memrefs to "
155                   "have static shape and use the identity map");
156         return signalPassFailure();
157       }
158     }
159 
160     // Apply in-dialect lowering. In-dialect lowering will replace
161     // ops which need to be lowered further, which is not supported by a
162     // single conversion pass.
163     {
164       RewritePatternSet patterns(ctx);
165       populateGpuRewritePatterns(patterns);
166       (void)applyPatternsAndFoldGreedily(m, std::move(patterns));
167     }
168 
169     LLVMTypeConverter converter(ctx, options);
170     populateGpuMemorySpaceAttributeConversions(
171         converter, [](gpu::AddressSpace space) {
172           switch (space) {
173           case gpu::AddressSpace::Global:
174             return 1;
175           case gpu::AddressSpace::Workgroup:
176             return 3;
177           case gpu::AddressSpace::Private:
178             return 5;
179           }
180           llvm_unreachable("unknown address space enum value");
181           return 0;
182         });
183 
184     RewritePatternSet llvmPatterns(ctx);
185 
186     mlir::arith::populateArithToLLVMConversionPatterns(converter, llvmPatterns);
187     populateAMDGPUToROCDLConversionPatterns(converter, llvmPatterns,
188                                             *maybeChipset);
189     populateVectorToLLVMConversionPatterns(converter, llvmPatterns);
190     cf::populateControlFlowToLLVMConversionPatterns(converter, llvmPatterns);
191     populateFuncToLLVMConversionPatterns(converter, llvmPatterns);
192     populateFinalizeMemRefToLLVMConversionPatterns(converter, llvmPatterns);
193     populateGpuToROCDLConversionPatterns(converter, llvmPatterns, runtime);
194     LLVMConversionTarget target(getContext());
195     configureGpuToROCDLConversionLegality(target);
196     if (failed(applyPartialConversion(m, target, std::move(llvmPatterns))))
197       signalPassFailure();
198 
199     // Manually rewrite known block size attributes so the LLVMIR translation
200     // infrastructure can pick them up.
201     m.walk([ctx](LLVM::LLVMFuncOp op) {
202       if (auto blockSizes = dyn_cast_or_null<DenseI32ArrayAttr>(
203               op->removeAttr(gpu::GPUFuncOp::getKnownBlockSizeAttrName()))) {
204         op->setAttr(ROCDL::ROCDLDialect::getReqdWorkGroupSizeAttrName(),
205                     blockSizes);
206         // Also set up the rocdl.flat_work_group_size attribute to prevent
207         // conflicting metadata.
208         uint32_t flatSize = 1;
209         for (uint32_t size : blockSizes.asArrayRef()) {
210           flatSize *= size;
211         }
212         StringAttr flatSizeAttr =
213             StringAttr::get(ctx, Twine(flatSize) + "," + Twine(flatSize));
214         op->setAttr(ROCDL::ROCDLDialect::getFlatWorkGroupSizeAttrName(),
215                     flatSizeAttr);
216       }
217     });
218   }
219 };
220 
221 } // namespace
222 
223 void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) {
224   target.addIllegalOp<func::FuncOp>();
225   target.addLegalDialect<::mlir::LLVM::LLVMDialect>();
226   target.addLegalDialect<ROCDL::ROCDLDialect>();
227   target.addIllegalDialect<gpu::GPUDialect>();
228   target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FAbsOp,
229                       LLVM::FCeilOp, LLVM::FFloorOp, LLVM::LogOp, LLVM::Log10Op,
230                       LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp, LLVM::SqrtOp>();
231 
232   // TODO: Remove once we support replacing non-root ops.
233   target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp, gpu::ModuleEndOp>();
234 }
235 
236 template <typename OpTy>
237 static void populateOpPatterns(LLVMTypeConverter &converter,
238                                RewritePatternSet &patterns, StringRef f32Func,
239                                StringRef f64Func) {
240   patterns.add<ScalarizeVectorOpLowering<OpTy>>(converter);
241   patterns.add<OpToFuncCallLowering<OpTy>>(converter, f32Func, f64Func);
242 }
243 
244 void mlir::populateGpuToROCDLConversionPatterns(
245     LLVMTypeConverter &converter, RewritePatternSet &patterns,
246     mlir::gpu::amd::Runtime runtime) {
247   using mlir::gpu::amd::Runtime;
248 
249   populateWithGenerated(patterns);
250   patterns
251       .add<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, ROCDL::ThreadIdXOp,
252                                        ROCDL::ThreadIdYOp, ROCDL::ThreadIdZOp>>(
253           converter, gpu::GPUFuncOp::getKnownBlockSizeAttrName());
254   patterns.add<GPUIndexIntrinsicOpLowering<
255       gpu::BlockIdOp, ROCDL::BlockIdXOp, ROCDL::BlockIdYOp, ROCDL::BlockIdZOp>>(
256       converter, gpu::GPUFuncOp::getKnownGridSizeAttrName());
257   patterns
258       .add<GPUIndexIntrinsicOpLowering<gpu::BlockDimOp, ROCDL::BlockDimXOp,
259                                        ROCDL::BlockDimYOp, ROCDL::BlockDimZOp>,
260            GPUIndexIntrinsicOpLowering<gpu::GridDimOp, ROCDL::GridDimXOp,
261                                        ROCDL::GridDimYOp, ROCDL::GridDimZOp>,
262            GPUReturnOpLowering>(converter);
263   patterns.add<GPUFuncOpLowering>(
264       converter,
265       /*allocaAddrSpace=*/ROCDL::ROCDLDialect::kPrivateMemoryAddressSpace,
266       /*workgroupAddrSpace=*/ROCDL::ROCDLDialect::kSharedMemoryAddressSpace,
267       StringAttr::get(&converter.getContext(),
268                       ROCDL::ROCDLDialect::getKernelFuncAttrName()));
269   if (Runtime::HIP == runtime) {
270     patterns.add<GPUPrintfOpToHIPLowering>(converter);
271   } else if (Runtime::OpenCL == runtime) {
272     // Use address space = 4 to match the OpenCL definition of printf()
273     patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/4);
274   }
275 
276   patterns.add<GPULaneIdOpToROCDL>(converter);
277 
278   populateOpPatterns<math::AbsFOp>(converter, patterns, "__ocml_fabs_f32",
279                                    "__ocml_fabs_f64");
280   populateOpPatterns<math::AtanOp>(converter, patterns, "__ocml_atan_f32",
281                                    "__ocml_atan_f64");
282   populateOpPatterns<math::Atan2Op>(converter, patterns, "__ocml_atan2_f32",
283                                     "__ocml_atan2_f64");
284   populateOpPatterns<math::CbrtOp>(converter, patterns, "__ocml_cbrt_f32",
285                                    "__ocml_cbrt_f64");
286   populateOpPatterns<math::CeilOp>(converter, patterns, "__ocml_ceil_f32",
287                                    "__ocml_ceil_f64");
288   populateOpPatterns<math::CosOp>(converter, patterns, "__ocml_cos_f32",
289                                   "__ocml_cos_f64");
290   populateOpPatterns<math::ExpOp>(converter, patterns, "__ocml_exp_f32",
291                                   "__ocml_exp_f64");
292   populateOpPatterns<math::Exp2Op>(converter, patterns, "__ocml_exp2_f32",
293                                    "__ocml_exp2_f64");
294   populateOpPatterns<math::ExpM1Op>(converter, patterns, "__ocml_expm1_f32",
295                                     "__ocml_expm1_f64");
296   populateOpPatterns<math::FloorOp>(converter, patterns, "__ocml_floor_f32",
297                                     "__ocml_floor_f64");
298   populateOpPatterns<math::LogOp>(converter, patterns, "__ocml_log_f32",
299                                   "__ocml_log_f64");
300   populateOpPatterns<math::Log10Op>(converter, patterns, "__ocml_log10_f32",
301                                     "__ocml_log10_f64");
302   populateOpPatterns<math::Log1pOp>(converter, patterns, "__ocml_log1p_f32",
303                                     "__ocml_log1p_f64");
304   populateOpPatterns<math::Log2Op>(converter, patterns, "__ocml_log2_f32",
305                                    "__ocml_log2_f64");
306   populateOpPatterns<math::PowFOp>(converter, patterns, "__ocml_pow_f32",
307                                    "__ocml_pow_f64");
308   populateOpPatterns<math::RsqrtOp>(converter, patterns, "__ocml_rsqrt_f32",
309                                     "__ocml_rsqrt_f64");
310   populateOpPatterns<math::SinOp>(converter, patterns, "__ocml_sin_f32",
311                                   "__ocml_sin_f64");
312   populateOpPatterns<math::SqrtOp>(converter, patterns, "__ocml_sqrt_f32",
313                                    "__ocml_sqrt_f64");
314   populateOpPatterns<math::TanhOp>(converter, patterns, "__ocml_tanh_f32",
315                                    "__ocml_tanh_f64");
316   populateOpPatterns<math::TanOp>(converter, patterns, "__ocml_tan_f32",
317                                   "__ocml_tan_f64");
318   populateOpPatterns<math::ErfOp>(converter, patterns, "__ocml_erf_f32",
319                                   "__ocml_erf_f64");
320 }
321 
322 std::unique_ptr<OperationPass<gpu::GPUModuleOp>>
323 mlir::createLowerGpuOpsToROCDLOpsPass(const std::string &chipset,
324                                       unsigned indexBitwidth,
325                                       bool useBarePtrCallConv,
326                                       gpu::amd::Runtime runtime) {
327   return std::make_unique<LowerGpuOpsToROCDLOpsPass>(
328       chipset, indexBitwidth, useBarePtrCallConv, runtime);
329 }
330