xref: /llvm-project/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp (revision 02b6fb218e44490f3ea1597e35df1b1b66c6b869)
1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the GPU dialect kernel outlining pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
15 #include "mlir/Dialect/DLTI/DLTI.h"
16 #include "mlir/Dialect/GPU/GPUDialect.h"
17 #include "mlir/Dialect/GPU/Passes.h"
18 #include "mlir/Dialect/GPU/Utils.h"
19 #include "mlir/Dialect/MemRef/IR/MemRef.h"
20 #include "mlir/Dialect/StandardOps/IR/Ops.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/IR/Builders.h"
23 #include "mlir/IR/SymbolTable.h"
24 #include "mlir/Parser.h"
25 #include "mlir/Support/LLVM.h"
26 #include "mlir/Transforms/RegionUtils.h"
27 
28 using namespace mlir;
29 
30 template <typename OpTy>
31 static void createForAllDimensions(OpBuilder &builder, Location loc,
32                                    SmallVectorImpl<Value> &values) {
33   for (StringRef dim : {"x", "y", "z"}) {
34     Value v = builder.create<OpTy>(loc, builder.getIndexType(),
35                                    builder.getStringAttr(dim));
36     values.push_back(v);
37   }
38 }
39 
40 /// Adds operations generating block/thread ids and grid/block dimensions at the
41 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in
42 /// entry block of `launchOpBody`, to the corresponding result value of the
43 /// added operations.
44 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
45                                      Region &launchOpBody,
46                                      BlockAndValueMapping &map) {
47   OpBuilder builder(loc->getContext());
48   Block &firstBlock = launchOpBody.front();
49   builder.setInsertionPointToStart(&launchFuncOpBody.front());
50   SmallVector<Value, 12> indexOps;
51   createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
52   createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
53   createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
54   createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
55   // Replace the leading 12 function args with the respective thread/block index
56   // operations. Iterate backwards since args are erased and indices change.
57   for (auto indexOp : enumerate(indexOps))
58     map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
59 }
60 
61 /// Identifies operations that are beneficial to sink into kernels. These
62 /// operations may not have side-effects, as otherwise sinking (and hence
63 /// duplicating them) is not legal.
64 static bool isSinkingBeneficiary(Operation *op) {
65   return isa<arith::ConstantOp, ConstantOp, memref::DimOp, SelectOp,
66              arith::CmpIOp>(op);
67 }
68 
69 /// For a given operation `op`, computes whether it is beneficial to sink the
70 /// operation into the kernel. An operation can be sunk if doing so does not
71 /// introduce new kernel arguments. Whether a value is already available in the
72 /// kernel (and hence does not introduce new arguments) is checked by
73 /// querying `existingDependencies` and `availableValues`.
74 /// If an operand is not yet available, we recursively check whether it can be
75 /// made available by siking its defining op.
76 /// Operations that are indentified for sinking are added to `beneficiaryOps` in
77 /// the order they should appear in the kernel. Furthermore, `availableValues`
78 /// is updated with results that will be available after sinking the identified
79 /// ops.
80 static bool
81 extractBeneficiaryOps(Operation *op, SetVector<Value> existingDependencies,
82                       SetVector<Operation *> &beneficiaryOps,
83                       llvm::SmallPtrSetImpl<Value> &availableValues) {
84   if (beneficiaryOps.count(op))
85     return true;
86 
87   if (!isSinkingBeneficiary(op))
88     return false;
89 
90   for (Value operand : op->getOperands()) {
91     // It is already visible in the kernel, keep going.
92     if (availableValues.count(operand))
93       continue;
94     // Else check whether it can be made available via sinking or already is a
95     // dependency.
96     Operation *definingOp = operand.getDefiningOp();
97     if ((!definingOp ||
98          !extractBeneficiaryOps(definingOp, existingDependencies,
99                                 beneficiaryOps, availableValues)) &&
100         !existingDependencies.count(operand))
101       return false;
102   }
103   // We will sink the operation, mark its results as now available.
104   beneficiaryOps.insert(op);
105   for (Value result : op->getResults())
106     availableValues.insert(result);
107   return true;
108 }
109 
110 LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
111   Region &launchOpBody = launchOp.body();
112 
113   // Identify uses from values defined outside of the scope of the launch
114   // operation.
115   SetVector<Value> sinkCandidates;
116   getUsedValuesDefinedAbove(launchOpBody, sinkCandidates);
117 
118   SetVector<Operation *> toBeSunk;
119   llvm::SmallPtrSet<Value, 4> availableValues;
120   for (Value operand : sinkCandidates) {
121     Operation *operandOp = operand.getDefiningOp();
122     if (!operandOp)
123       continue;
124     extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues);
125   }
126 
127   // Insert operations so that the defs get cloned before uses.
128   BlockAndValueMapping map;
129   OpBuilder builder(launchOpBody);
130   for (Operation *op : toBeSunk) {
131     Operation *clonedOp = builder.clone(*op, map);
132     // Only replace uses within the launch op.
133     for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults()))
134       replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair),
135                                  launchOp.body());
136   }
137   return success();
138 }
139 
140 /// Outline the `gpu.launch` operation body into a kernel function. Replace
141 /// `gpu.terminator` operations by `gpu.return` in the generated function.
142 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
143                                             StringRef kernelFnName,
144                                             SetVector<Value> &operands) {
145   Location loc = launchOp.getLoc();
146   // Create a builder with no insertion point, insertion will happen separately
147   // due to symbol table manipulation.
148   OpBuilder builder(launchOp.getContext());
149   Region &launchOpBody = launchOp.body();
150 
151   // Identify uses from values defined outside of the scope of the launch
152   // operation.
153   getUsedValuesDefinedAbove(launchOpBody, operands);
154 
155   // Create the gpu.func operation.
156   SmallVector<Type, 4> kernelOperandTypes;
157   kernelOperandTypes.reserve(operands.size());
158   for (Value operand : operands) {
159     kernelOperandTypes.push_back(operand.getType());
160   }
161   FunctionType type =
162       FunctionType::get(launchOp.getContext(), kernelOperandTypes, {});
163   auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type);
164   outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
165                         builder.getUnitAttr());
166   BlockAndValueMapping map;
167 
168   // Map the arguments corresponding to the launch parameters like blockIdx,
169   // threadIdx, etc.
170   Region &outlinedFuncBody = outlinedFunc.body();
171   injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
172 
173   // Map arguments from gpu.launch region to the arguments of the gpu.func
174   // operation.
175   Block &entryBlock = outlinedFuncBody.front();
176   for (auto operand : enumerate(operands))
177     map.map(operand.value(), entryBlock.getArgument(operand.index()));
178 
179   // Clone the region of the gpu.launch operation into the gpu.func operation.
180   // TODO: If cloneInto can be modified such that if a mapping for
181   // a block exists, that block will be used to clone operations into (at the
182   // end of the block), instead of creating a new block, this would be much
183   // cleaner.
184   launchOpBody.cloneInto(&outlinedFuncBody, map);
185 
186   // Branch from entry of the gpu.func operation to the block that is cloned
187   // from the entry block of the gpu.launch operation.
188   Block &launchOpEntry = launchOpBody.front();
189   Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
190   builder.setInsertionPointToEnd(&entryBlock);
191   builder.create<BranchOp>(loc, clonedLaunchOpEntry);
192 
193   outlinedFunc.walk([](gpu::TerminatorOp op) {
194     OpBuilder replacer(op);
195     replacer.create<gpu::ReturnOp>(op.getLoc());
196     op.erase();
197   });
198   return outlinedFunc;
199 }
200 
201 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp,
202                                        StringRef kernelFnName,
203                                        llvm::SmallVectorImpl<Value> &operands) {
204   DenseSet<Value> inputOperandSet;
205   inputOperandSet.insert(operands.begin(), operands.end());
206   SetVector<Value> operandSet(operands.begin(), operands.end());
207   auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet);
208   for (auto operand : operandSet) {
209     if (!inputOperandSet.count(operand))
210       operands.push_back(operand);
211   }
212   return funcOp;
213 }
214 
215 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation
216 /// launching `kernelFunc`. The kernel func contains the body of the
217 /// `gpu.launch` with constant region arguments inlined.
218 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
219                                   gpu::GPUFuncOp kernelFunc,
220                                   ValueRange operands) {
221   OpBuilder builder(launchOp);
222   // The launch op has an optional dynamic shared memory size. If it doesn't
223   // exist, we use zero.
224   builder.create<gpu::LaunchFuncOp>(
225       launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
226       launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(),
227       operands);
228   launchOp.erase();
229 }
230 
231 namespace {
232 /// Pass that moves the kernel of each LaunchOp into its separate nested module.
233 ///
234 /// This pass moves the kernel code of each LaunchOp into a function created
235 /// inside a nested module. It also creates an external function of the same
236 /// name in the parent module.
237 ///
238 /// The gpu.modules are intended to be compiled to a cubin blob independently in
239 /// a separate pass. The external functions can then be annotated with the
240 /// symbol of the cubin accessor function.
241 class GpuKernelOutliningPass
242     : public GpuKernelOutliningBase<GpuKernelOutliningPass> {
243 public:
244   GpuKernelOutliningPass(StringRef dlStr) {
245     if (!dlStr.empty() && !dataLayoutStr.hasValue())
246       dataLayoutStr = dlStr.str();
247   }
248 
249   GpuKernelOutliningPass(const GpuKernelOutliningPass &other)
250       : dataLayoutSpec(other.dataLayoutSpec) {
251     dataLayoutStr = other.dataLayoutStr;
252   }
253 
254   LogicalResult initialize(MLIRContext *context) override {
255     // Initialize the data layout specification from the data layout string.
256     if (!dataLayoutStr.empty()) {
257       Attribute resultAttr = mlir::parseAttribute(dataLayoutStr, context);
258       if (!resultAttr)
259         return failure();
260 
261       dataLayoutSpec = resultAttr.dyn_cast<DataLayoutSpecInterface>();
262       if (!dataLayoutSpec)
263         return failure();
264     }
265 
266     return success();
267   }
268 
269   void runOnOperation() override {
270     SymbolTable symbolTable(getOperation());
271     bool modified = false;
272     for (auto func : getOperation().getOps<FuncOp>()) {
273       // Insert just after the function.
274       Block::iterator insertPt(func->getNextNode());
275       auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
276         SetVector<Value> operands;
277         std::string kernelFnName =
278             Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
279 
280         // Pull in instructions that can be sunk
281         if (failed(sinkOperationsIntoLaunchOp(op)))
282           return WalkResult::interrupt();
283         gpu::GPUFuncOp outlinedFunc =
284             outlineKernelFuncImpl(op, kernelFnName, operands);
285 
286         // Create nested module and insert outlinedFunc. The module will
287         // originally get the same name as the function, but may be renamed on
288         // insertion into the parent module.
289         auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
290         symbolTable.insert(kernelModule, insertPt);
291 
292         // Potentially changes signature, pulling in constants.
293         convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef());
294         modified = true;
295         return WalkResult::advance();
296       });
297       if (funcWalkResult.wasInterrupted())
298         return signalPassFailure();
299     }
300 
301     // If any new module was inserted in this module, annotate this module as
302     // a container module.
303     if (modified)
304       getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
305                               UnitAttr::get(&getContext()));
306   }
307 
308 private:
309   /// Returns a gpu.module containing kernelFunc and all callees (recursive).
310   gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
311                                       const SymbolTable &parentSymbolTable) {
312     // TODO: This code cannot use an OpBuilder because it must be inserted into
313     // a SymbolTable by the caller. SymbolTable needs to be refactored to
314     // prevent manual building of Ops with symbols in code using SymbolTables
315     // and then this needs to use the OpBuilder.
316     auto *context = getOperation().getContext();
317     OpBuilder builder(context);
318     auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
319                                                          kernelFunc.getName());
320 
321     // If a valid data layout spec was provided, attach it to the kernel module.
322     // Otherwise, the default data layout will be used.
323     if (dataLayoutSpec)
324       kernelModule->setAttr("dlspec", dataLayoutSpec);
325 
326     SymbolTable symbolTable(kernelModule);
327     symbolTable.insert(kernelFunc);
328 
329     SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc};
330     while (!symbolDefWorklist.empty()) {
331       if (Optional<SymbolTable::UseRange> symbolUses =
332               SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) {
333         for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
334           StringRef symbolName =
335               symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue();
336           if (symbolTable.lookup(symbolName))
337             continue;
338 
339           Operation *symbolDefClone =
340               parentSymbolTable.lookup(symbolName)->clone();
341           symbolDefWorklist.push_back(symbolDefClone);
342           symbolTable.insert(symbolDefClone);
343         }
344       }
345     }
346 
347     return kernelModule;
348   }
349 
350   Option<std::string> dataLayoutStr{
351       *this, "data-layout-str",
352       llvm::cl::desc("String containing the data layout specification to be "
353                      "attached to the GPU kernel module")};
354 
355   DataLayoutSpecInterface dataLayoutSpec;
356 };
357 
358 } // namespace
359 
360 std::unique_ptr<OperationPass<ModuleOp>>
361 mlir::createGpuKernelOutliningPass(StringRef dataLayoutStr) {
362   return std::make_unique<GpuKernelOutliningPass>(dataLayoutStr);
363 }
364