1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the GPU dialect kernel outlining pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/GPU/GPUDialect.h" 15 #include "mlir/Dialect/GPU/Passes.h" 16 #include "mlir/Dialect/GPU/Utils.h" 17 #include "mlir/Dialect/MemRef/IR/MemRef.h" 18 #include "mlir/Dialect/StandardOps/IR/Ops.h" 19 #include "mlir/IR/BlockAndValueMapping.h" 20 #include "mlir/IR/Builders.h" 21 #include "mlir/IR/SymbolTable.h" 22 #include "mlir/Support/LLVM.h" 23 #include "mlir/Transforms/RegionUtils.h" 24 25 using namespace mlir; 26 27 template <typename OpTy> 28 static void createForAllDimensions(OpBuilder &builder, Location loc, 29 SmallVectorImpl<Value> &values) { 30 for (StringRef dim : {"x", "y", "z"}) { 31 Value v = builder.create<OpTy>(loc, builder.getIndexType(), 32 builder.getStringAttr(dim)); 33 values.push_back(v); 34 } 35 } 36 37 /// Adds operations generating block/thread ids and grid/block dimensions at the 38 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in 39 /// entry block of `launchOpBody`, to the corresponding result value of the 40 /// added operations. 41 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody, 42 Region &launchOpBody, 43 BlockAndValueMapping &map) { 44 OpBuilder builder(loc->getContext()); 45 Block &firstBlock = launchOpBody.front(); 46 builder.setInsertionPointToStart(&launchFuncOpBody.front()); 47 SmallVector<Value, 12> indexOps; 48 createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps); 49 createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps); 50 createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps); 51 createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps); 52 // Replace the leading 12 function args with the respective thread/block index 53 // operations. Iterate backwards since args are erased and indices change. 54 for (auto indexOp : enumerate(indexOps)) 55 map.map(firstBlock.getArgument(indexOp.index()), indexOp.value()); 56 } 57 58 /// Identifies operations that are beneficial to sink into kernels. These 59 /// operations may not have side-effects, as otherwise sinking (and hence 60 /// duplicating them) is not legal. 61 static bool isSinkingBeneficiary(Operation *op) { 62 return isa<ConstantOp, memref::DimOp, SelectOp, CmpIOp>(op); 63 } 64 65 /// For a given operation `op`, computes whether it is beneficial to sink the 66 /// operation into the kernel. An operation can be sunk if doing so does not 67 /// introduce new kernel arguments. Whether a value is already available in the 68 /// kernel (and hence does not introduce new arguments) is checked by 69 /// querying `existingDependencies` and `availableValues`. 70 /// If an operand is not yet available, we recursively check whether it can be 71 /// made available by siking its defining op. 72 /// Operations that are indentified for sinking are added to `beneficiaryOps` in 73 /// the order they should appear in the kernel. Furthermore, `availableValues` 74 /// is updated with results that will be available after sinking the identified 75 /// ops. 76 static bool 77 extractBeneficiaryOps(Operation *op, SetVector<Value> existingDependencies, 78 SetVector<Operation *> &beneficiaryOps, 79 llvm::SmallPtrSetImpl<Value> &availableValues) { 80 if (beneficiaryOps.count(op)) 81 return true; 82 83 if (!isSinkingBeneficiary(op)) 84 return false; 85 86 for (Value operand : op->getOperands()) { 87 // It is already visible in the kernel, keep going. 88 if (availableValues.count(operand)) 89 continue; 90 // Else check whether it can be made available via sinking or already is a 91 // dependency. 92 Operation *definingOp = operand.getDefiningOp(); 93 if ((!definingOp || 94 !extractBeneficiaryOps(definingOp, existingDependencies, 95 beneficiaryOps, availableValues)) && 96 !existingDependencies.count(operand)) 97 return false; 98 } 99 // We will sink the operation, mark its results as now available. 100 beneficiaryOps.insert(op); 101 for (Value result : op->getResults()) 102 availableValues.insert(result); 103 return true; 104 } 105 106 LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) { 107 Region &launchOpBody = launchOp.body(); 108 109 // Identify uses from values defined outside of the scope of the launch 110 // operation. 111 SetVector<Value> sinkCandidates; 112 getUsedValuesDefinedAbove(launchOpBody, sinkCandidates); 113 114 SetVector<Operation *> toBeSunk; 115 llvm::SmallPtrSet<Value, 4> availableValues; 116 for (Value operand : sinkCandidates) { 117 Operation *operandOp = operand.getDefiningOp(); 118 if (!operandOp) 119 continue; 120 extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues); 121 } 122 123 // Insert operations so that the defs get cloned before uses. 124 BlockAndValueMapping map; 125 OpBuilder builder(launchOpBody); 126 for (Operation *op : toBeSunk) { 127 Operation *clonedOp = builder.clone(*op, map); 128 // Only replace uses within the launch op. 129 for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults())) 130 replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair), 131 launchOp.body()); 132 } 133 return success(); 134 } 135 136 /// Outline the `gpu.launch` operation body into a kernel function. Replace 137 /// `gpu.terminator` operations by `gpu.return` in the generated function. 138 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp, 139 StringRef kernelFnName, 140 SetVector<Value> &operands) { 141 Location loc = launchOp.getLoc(); 142 // Create a builder with no insertion point, insertion will happen separately 143 // due to symbol table manipulation. 144 OpBuilder builder(launchOp.getContext()); 145 Region &launchOpBody = launchOp.body(); 146 147 // Identify uses from values defined outside of the scope of the launch 148 // operation. 149 getUsedValuesDefinedAbove(launchOpBody, operands); 150 151 // Create the gpu.func operation. 152 SmallVector<Type, 4> kernelOperandTypes; 153 kernelOperandTypes.reserve(operands.size()); 154 for (Value operand : operands) { 155 kernelOperandTypes.push_back(operand.getType()); 156 } 157 FunctionType type = 158 FunctionType::get(launchOp.getContext(), kernelOperandTypes, {}); 159 auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type); 160 outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(), 161 builder.getUnitAttr()); 162 BlockAndValueMapping map; 163 164 // Map the arguments corresponding to the launch parameters like blockIdx, 165 // threadIdx, etc. 166 Region &outlinedFuncBody = outlinedFunc.body(); 167 injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map); 168 169 // Map arguments from gpu.launch region to the arguments of the gpu.func 170 // operation. 171 Block &entryBlock = outlinedFuncBody.front(); 172 for (auto operand : enumerate(operands)) 173 map.map(operand.value(), entryBlock.getArgument(operand.index())); 174 175 // Clone the region of the gpu.launch operation into the gpu.func operation. 176 // TODO: If cloneInto can be modified such that if a mapping for 177 // a block exists, that block will be used to clone operations into (at the 178 // end of the block), instead of creating a new block, this would be much 179 // cleaner. 180 launchOpBody.cloneInto(&outlinedFuncBody, map); 181 182 // Branch from entry of the gpu.func operation to the block that is cloned 183 // from the entry block of the gpu.launch operation. 184 Block &launchOpEntry = launchOpBody.front(); 185 Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry); 186 builder.setInsertionPointToEnd(&entryBlock); 187 builder.create<BranchOp>(loc, clonedLaunchOpEntry); 188 189 outlinedFunc.walk([](gpu::TerminatorOp op) { 190 OpBuilder replacer(op); 191 replacer.create<gpu::ReturnOp>(op.getLoc()); 192 op.erase(); 193 }); 194 return outlinedFunc; 195 } 196 197 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp, 198 StringRef kernelFnName, 199 llvm::SmallVectorImpl<Value> &operands) { 200 DenseSet<Value> inputOperandSet; 201 inputOperandSet.insert(operands.begin(), operands.end()); 202 SetVector<Value> operandSet(operands.begin(), operands.end()); 203 auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet); 204 for (auto operand : operandSet) { 205 if (!inputOperandSet.count(operand)) 206 operands.push_back(operand); 207 } 208 return funcOp; 209 } 210 211 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation 212 /// launching `kernelFunc`. The kernel func contains the body of the 213 /// `gpu.launch` with constant region arguments inlined. 214 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp, 215 gpu::GPUFuncOp kernelFunc, 216 ValueRange operands) { 217 OpBuilder builder(launchOp); 218 // The launch op has an optional dynamic shared memory size. If it doesn't 219 // exist, we use zero. 220 builder.create<gpu::LaunchFuncOp>( 221 launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(), 222 launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(), 223 operands); 224 launchOp.erase(); 225 } 226 227 namespace { 228 /// Pass that moves the kernel of each LaunchOp into its separate nested module. 229 /// 230 /// This pass moves the kernel code of each LaunchOp into a function created 231 /// inside a nested module. It also creates an external function of the same 232 /// name in the parent module. 233 /// 234 /// The gpu.modules are intended to be compiled to a cubin blob independently in 235 /// a separate pass. The external functions can then be annotated with the 236 /// symbol of the cubin accessor function. 237 class GpuKernelOutliningPass 238 : public GpuKernelOutliningBase<GpuKernelOutliningPass> { 239 public: 240 void runOnOperation() override { 241 SymbolTable symbolTable(getOperation()); 242 bool modified = false; 243 for (auto func : getOperation().getOps<FuncOp>()) { 244 // Insert just after the function. 245 Block::iterator insertPt(func->getNextNode()); 246 auto funcWalkResult = func.walk([&](gpu::LaunchOp op) { 247 SetVector<Value> operands; 248 std::string kernelFnName = 249 Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str(); 250 251 // Pull in instructions that can be sunk 252 if (failed(sinkOperationsIntoLaunchOp(op))) 253 return WalkResult::interrupt(); 254 gpu::GPUFuncOp outlinedFunc = 255 outlineKernelFuncImpl(op, kernelFnName, operands); 256 257 // Create nested module and insert outlinedFunc. The module will 258 // originally get the same name as the function, but may be renamed on 259 // insertion into the parent module. 260 auto kernelModule = createKernelModule(outlinedFunc, symbolTable); 261 symbolTable.insert(kernelModule, insertPt); 262 263 // Potentially changes signature, pulling in constants. 264 convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef()); 265 modified = true; 266 return WalkResult::advance(); 267 }); 268 if (funcWalkResult.wasInterrupted()) 269 return signalPassFailure(); 270 } 271 272 // If any new module was inserted in this module, annotate this module as 273 // a container module. 274 if (modified) 275 getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(), 276 UnitAttr::get(&getContext())); 277 } 278 279 private: 280 /// Returns a gpu.module containing kernelFunc and all callees (recursive). 281 gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc, 282 const SymbolTable &parentSymbolTable) { 283 // TODO: This code cannot use an OpBuilder because it must be inserted into 284 // a SymbolTable by the caller. SymbolTable needs to be refactored to 285 // prevent manual building of Ops with symbols in code using SymbolTables 286 // and then this needs to use the OpBuilder. 287 auto context = getOperation().getContext(); 288 OpBuilder builder(context); 289 auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(), 290 kernelFunc.getName()); 291 SymbolTable symbolTable(kernelModule); 292 symbolTable.insert(kernelFunc); 293 294 SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc}; 295 while (!symbolDefWorklist.empty()) { 296 if (Optional<SymbolTable::UseRange> symbolUses = 297 SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) { 298 for (SymbolTable::SymbolUse symbolUse : *symbolUses) { 299 StringRef symbolName = 300 symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue(); 301 if (symbolTable.lookup(symbolName)) 302 continue; 303 304 Operation *symbolDefClone = 305 parentSymbolTable.lookup(symbolName)->clone(); 306 symbolDefWorklist.push_back(symbolDefClone); 307 symbolTable.insert(symbolDefClone); 308 } 309 } 310 } 311 312 return kernelModule; 313 } 314 }; 315 316 } // namespace 317 318 std::unique_ptr<OperationPass<ModuleOp>> mlir::createGpuKernelOutliningPass() { 319 return std::make_unique<GpuKernelOutliningPass>(); 320 } 321