xref: /llvm-project/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp (revision 9a277af2d429e1d53f65417400583d5ea66d3ed1)
1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the GPU dialect kernel outlining pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/GPU/GPUDialect.h"
14 #include "mlir/Dialect/GPU/Passes.h"
15 #include "mlir/Dialect/GPU/Utils.h"
16 #include "mlir/Dialect/StandardOps/IR/Ops.h"
17 #include "mlir/IR/BlockAndValueMapping.h"
18 #include "mlir/IR/Builders.h"
19 #include "mlir/IR/SymbolTable.h"
20 #include "mlir/Pass/Pass.h"
21 #include "mlir/Transforms/RegionUtils.h"
22 
23 using namespace mlir;
24 
25 template <typename OpTy>
26 static void createForAllDimensions(OpBuilder &builder, Location loc,
27                                    SmallVectorImpl<Value> &values) {
28   for (StringRef dim : {"x", "y", "z"}) {
29     Value v = builder.create<OpTy>(loc, builder.getIndexType(),
30                                    builder.getStringAttr(dim));
31     values.push_back(v);
32   }
33 }
34 
35 // Add operations generating block/thread ids and grid/block dimensions at the
36 // beginning of the `launchFuncOpBody` region. Add mapping from argument in
37 // entry block of `launchOpBody`, to the corresponding result value of the added
38 // operations.
39 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
40                                      Region &launchOpBody,
41                                      BlockAndValueMapping &map) {
42   OpBuilder builder(loc->getContext());
43   Block &firstBlock = launchOpBody.front();
44   builder.setInsertionPointToStart(&launchFuncOpBody.front());
45   SmallVector<Value, 12> indexOps;
46   createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
47   createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
48   createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
49   createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
50   // Replace the leading 12 function args with the respective thread/block index
51   // operations. Iterate backwards since args are erased and indices change.
52   for (auto indexOp : enumerate(indexOps))
53     map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
54 }
55 
56 static bool isSinkingBeneficiary(Operation *op) {
57   return isa<ConstantOp>(op) || isa<DimOp>(op);
58 }
59 
60 LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
61   Region &launchOpBody = launchOp.body();
62 
63   // Identify uses from values defined outside of the scope of the launch
64   // operation.
65   llvm::SetVector<Value> sinkCandidates;
66   getUsedValuesDefinedAbove(launchOpBody, sinkCandidates);
67 
68   llvm::SetVector<Value> sunkValues;
69   llvm::SetVector<Operation *> sunkOperations;
70   for (Value operand : sinkCandidates) {
71     Operation *operandOp = operand.getDefiningOp();
72     if (!operandOp || !isSinkingBeneficiary(operandOp))
73       continue;
74     // Only sink operations that do not create new sinkCandidates.
75     if (!llvm::all_of(operandOp->getOperands(), [&sinkCandidates](Value value) {
76           return sinkCandidates.count(value);
77         }))
78       continue;
79     sunkValues.insert(operand);
80     sunkOperations.insert(operandOp);
81   }
82 
83   // Insert operations so that the defs get cloned before uses.
84   BlockAndValueMapping map;
85   OpBuilder builder(launchOpBody);
86   DenseSet<Operation *> processed;
87   SmallVector<Operation *, 2> clonedOps;
88   while (processed.size() != sunkOperations.size()) {
89     auto startSize = processed.size();
90     for (Operation *sunkOperation : sunkOperations) {
91       if (processed.count(sunkOperation))
92         continue;
93 
94       // Operation cant be cloned yet if any of its operands is also being sunk,
95       // but isnt cloned yet.
96       if (llvm::any_of(
97               sunkOperation->getOperands(), [&sunkValues, &map](Value value) {
98                 return sunkValues.count(value) && !map.lookupOrNull(value);
99               }))
100         continue;
101 
102       Operation *clonedOp = builder.clone(*sunkOperation, map);
103       // Only replace uses within the launch op.
104       for (auto result : llvm::enumerate(sunkOperation->getResults())) {
105         auto replacement = clonedOp->getResult(result.index());
106         for (auto &use : llvm::make_early_inc_range(result.value().getUses()))
107           if (use.getOwner()->getParentOfType<gpu::LaunchOp>() == launchOp)
108             use.set(replacement);
109       }
110       processed.insert(sunkOperation);
111     }
112     if (startSize == processed.size())
113       return launchOp.emitError(
114           "found illegal cyclic dependency between operations while sinking");
115   }
116   return success();
117 }
118 
119 // Outline the `gpu.launch` operation body into a kernel function. Replace
120 // `gpu.terminator` operations by `gpu.return` in the generated function.
121 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
122                                             StringRef kernelFnName,
123                                             llvm::SetVector<Value> &operands) {
124   Location loc = launchOp.getLoc();
125   // Create a builder with no insertion point, insertion will happen separately
126   // due to symbol table manipulation.
127   OpBuilder builder(launchOp.getContext());
128   Region &launchOpBody = launchOp.body();
129 
130   // Identify uses from values defined outside of the scope of the launch
131   // operation.
132   getUsedValuesDefinedAbove(launchOpBody, operands);
133 
134   // Create the gpu.func operation.
135   SmallVector<Type, 4> kernelOperandTypes;
136   kernelOperandTypes.reserve(operands.size());
137   for (Value operand : operands) {
138     kernelOperandTypes.push_back(operand.getType());
139   }
140   FunctionType type =
141       FunctionType::get(kernelOperandTypes, {}, launchOp.getContext());
142   auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type);
143   outlinedFunc.setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
144                        builder.getUnitAttr());
145   BlockAndValueMapping map;
146 
147   // Map the arguments corresponding to the launch parameters like blockIdx,
148   // threadIdx, etc.
149   Region &outlinedFuncBody = outlinedFunc.body();
150   injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
151 
152   // Map arguments from gpu.launch region to the arguments of the gpu.func
153   // operation.
154   Block &entryBlock = outlinedFuncBody.front();
155   for (auto operand : enumerate(operands))
156     map.map(operand.value(), entryBlock.getArgument(operand.index()));
157 
158   // Clone the region of the gpu.launch operation into the gpu.func operation.
159   // TODO(ravishankarm): If cloneInto can be modified such that if a mapping for
160   // a block exists, that block will be used to clone operations into (at the
161   // end of the block), instead of creating a new block, this would be much
162   // cleaner.
163   launchOpBody.cloneInto(&outlinedFuncBody, map);
164 
165   // Branch from enty of the gpu.func operation to the block that is cloned from
166   // the entry block of the gpu.launch operation.
167   Block &launchOpEntry = launchOpBody.front();
168   Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
169   builder.setInsertionPointToEnd(&entryBlock);
170   builder.create<BranchOp>(loc, clonedLaunchOpEntry);
171 
172   outlinedFunc.walk([](gpu::TerminatorOp op) {
173     OpBuilder replacer(op);
174     replacer.create<gpu::ReturnOp>(op.getLoc());
175     op.erase();
176   });
177   return outlinedFunc;
178 }
179 
180 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp,
181                                        StringRef kernelFnName,
182                                        llvm::SmallVectorImpl<Value> &operands) {
183   DenseSet<Value> inputOperandSet;
184   inputOperandSet.insert(operands.begin(), operands.end());
185   llvm::SetVector<Value> operandSet(operands.begin(), operands.end());
186   auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet);
187   for (auto operand : operandSet) {
188     if (!inputOperandSet.count(operand))
189       operands.push_back(operand);
190   }
191   return funcOp;
192 }
193 
194 // Replace `gpu.launch` operations with an `gpu.launch_func` operation launching
195 // `kernelFunc`. The kernel func contains the body of the `gpu.launch` with
196 // constant region arguments inlined.
197 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
198                                   gpu::GPUFuncOp kernelFunc,
199                                   ValueRange operands) {
200   OpBuilder builder(launchOp);
201   builder.create<gpu::LaunchFuncOp>(
202       launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
203       launchOp.getBlockSizeOperandValues(), operands);
204   launchOp.erase();
205 }
206 
207 namespace {
208 /// Pass that moves the kernel of each LaunchOp into its separate nested module.
209 ///
210 /// This pass moves the kernel code of each LaunchOp into a function created
211 /// inside a nested module. It also creates an external function of the same
212 /// name in the parent module.
213 ///
214 /// The gpu.modules are intended to be compiled to a cubin blob independently in
215 /// a separate pass. The external functions can then be annotated with the
216 /// symbol of the cubin accessor function.
217 class GpuKernelOutliningPass : public ModulePass<GpuKernelOutliningPass> {
218 public:
219 /// Include the generated pass utilities.
220 #define GEN_PASS_GpuKernelOutlining
221 #include "mlir/Dialect/GPU/Passes.h.inc"
222 
223   void runOnModule() override {
224     SymbolTable symbolTable(getModule());
225     bool modified = false;
226     for (auto func : getModule().getOps<FuncOp>()) {
227       // Insert just after the function.
228       Block::iterator insertPt(func.getOperation()->getNextNode());
229       auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
230         llvm::SetVector<Value> operands;
231         std::string kernelFnName =
232             Twine(op.getParentOfType<FuncOp>().getName(), "_kernel").str();
233 
234         // Pull in instructions that can be sunk
235         if (failed(sinkOperationsIntoLaunchOp(op)))
236           return WalkResult::interrupt();
237         gpu::GPUFuncOp outlinedFunc =
238             outlineKernelFuncImpl(op, kernelFnName, operands);
239 
240         // Create nested module and insert outlinedFunc. The module will
241         // originally get the same name as the function, but may be renamed on
242         // insertion into the parent module.
243         auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
244         symbolTable.insert(kernelModule, insertPt);
245 
246         // Potentially changes signature, pulling in constants.
247         convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef());
248         modified = true;
249         return WalkResult::advance();
250       });
251       if (funcWalkResult.wasInterrupted())
252         return signalPassFailure();
253     }
254 
255     // If any new module was inserted in this module, annotate this module as
256     // a container module.
257     if (modified)
258       getModule().setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
259                           UnitAttr::get(&getContext()));
260   }
261 
262 private:
263   // Returns a gpu.module containing kernelFunc and all callees (recursive).
264   gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
265                                       const SymbolTable &parentSymbolTable) {
266     // TODO: This code cannot use an OpBuilder because it must be inserted into
267     // a SymbolTable by the caller. SymbolTable needs to be refactored to
268     // prevent manual building of Ops with symbols in code using SymbolTables
269     // and then this needs to use the OpBuilder.
270     auto context = getModule().getContext();
271     Builder builder(context);
272     OperationState state(kernelFunc.getLoc(),
273                          gpu::GPUModuleOp::getOperationName());
274     gpu::GPUModuleOp::build(&builder, state, kernelFunc.getName());
275     auto kernelModule = cast<gpu::GPUModuleOp>(Operation::create(state));
276     SymbolTable symbolTable(kernelModule);
277     symbolTable.insert(kernelFunc);
278 
279     SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc};
280     while (!symbolDefWorklist.empty()) {
281       if (Optional<SymbolTable::UseRange> symbolUses =
282               SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) {
283         for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
284           StringRef symbolName =
285               symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue();
286           if (symbolTable.lookup(symbolName))
287             continue;
288 
289           Operation *symbolDefClone =
290               parentSymbolTable.lookup(symbolName)->clone();
291           symbolDefWorklist.push_back(symbolDefClone);
292           symbolTable.insert(symbolDefClone);
293         }
294       }
295     }
296 
297     return kernelModule;
298   }
299 };
300 
301 } // namespace
302 
303 std::unique_ptr<OpPassBase<ModuleOp>> mlir::createGpuKernelOutliningPass() {
304   return std::make_unique<GpuKernelOutliningPass>();
305 }
306