1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the GPU dialect kernel outlining pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 15 #include "mlir/Dialect/DLTI/DLTI.h" 16 #include "mlir/Dialect/GPU/GPUDialect.h" 17 #include "mlir/Dialect/GPU/Passes.h" 18 #include "mlir/Dialect/GPU/Utils.h" 19 #include "mlir/Dialect/MemRef/IR/MemRef.h" 20 #include "mlir/Dialect/StandardOps/IR/Ops.h" 21 #include "mlir/IR/BlockAndValueMapping.h" 22 #include "mlir/IR/Builders.h" 23 #include "mlir/IR/SymbolTable.h" 24 #include "mlir/Parser.h" 25 #include "mlir/Support/LLVM.h" 26 #include "mlir/Transforms/RegionUtils.h" 27 28 using namespace mlir; 29 30 template <typename OpTy> 31 static void createForAllDimensions(OpBuilder &builder, Location loc, 32 SmallVectorImpl<Value> &values) { 33 for (auto dim : {gpu::Dimension::x, gpu::Dimension::y, gpu::Dimension::z}) 34 values.push_back(builder.create<OpTy>(loc, builder.getIndexType(), dim)); 35 } 36 37 /// Adds operations generating block/thread ids and grid/block dimensions at the 38 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in 39 /// entry block of `launchOpBody`, to the corresponding result value of the 40 /// added operations. 41 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody, 42 Region &launchOpBody, 43 BlockAndValueMapping &map) { 44 OpBuilder builder(loc->getContext()); 45 Block &firstBlock = launchOpBody.front(); 46 builder.setInsertionPointToStart(&launchFuncOpBody.front()); 47 SmallVector<Value, 12> indexOps; 48 createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps); 49 createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps); 50 createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps); 51 createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps); 52 // Replace the leading 12 function args with the respective thread/block index 53 // operations. Iterate backwards since args are erased and indices change. 54 for (const auto &indexOp : enumerate(indexOps)) 55 map.map(firstBlock.getArgument(indexOp.index()), indexOp.value()); 56 } 57 58 /// Identifies operations that are beneficial to sink into kernels. These 59 /// operations may not have side-effects, as otherwise sinking (and hence 60 /// duplicating them) is not legal. 61 static bool isSinkingBeneficiary(Operation *op) { 62 return isa<arith::ConstantOp, ConstantOp, memref::DimOp, arith::SelectOp, 63 arith::CmpIOp>(op); 64 } 65 66 /// For a given operation `op`, computes whether it is beneficial to sink the 67 /// operation into the kernel. An operation can be sunk if doing so does not 68 /// introduce new kernel arguments. Whether a value is already available in the 69 /// kernel (and hence does not introduce new arguments) is checked by 70 /// querying `existingDependencies` and `availableValues`. 71 /// If an operand is not yet available, we recursively check whether it can be 72 /// made available by siking its defining op. 73 /// Operations that are indentified for sinking are added to `beneficiaryOps` in 74 /// the order they should appear in the kernel. Furthermore, `availableValues` 75 /// is updated with results that will be available after sinking the identified 76 /// ops. 77 static bool 78 extractBeneficiaryOps(Operation *op, 79 const SetVector<Value> &existingDependencies, 80 SetVector<Operation *> &beneficiaryOps, 81 llvm::SmallPtrSetImpl<Value> &availableValues) { 82 if (beneficiaryOps.count(op)) 83 return true; 84 85 if (!isSinkingBeneficiary(op)) 86 return false; 87 88 for (Value operand : op->getOperands()) { 89 // It is already visible in the kernel, keep going. 90 if (availableValues.count(operand)) 91 continue; 92 // Else check whether it can be made available via sinking or already is a 93 // dependency. 94 Operation *definingOp = operand.getDefiningOp(); 95 if ((!definingOp || 96 !extractBeneficiaryOps(definingOp, existingDependencies, 97 beneficiaryOps, availableValues)) && 98 !existingDependencies.count(operand)) 99 return false; 100 } 101 // We will sink the operation, mark its results as now available. 102 beneficiaryOps.insert(op); 103 for (Value result : op->getResults()) 104 availableValues.insert(result); 105 return true; 106 } 107 108 LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) { 109 Region &launchOpBody = launchOp.body(); 110 111 // Identify uses from values defined outside of the scope of the launch 112 // operation. 113 SetVector<Value> sinkCandidates; 114 getUsedValuesDefinedAbove(launchOpBody, sinkCandidates); 115 116 SetVector<Operation *> toBeSunk; 117 llvm::SmallPtrSet<Value, 4> availableValues; 118 for (Value operand : sinkCandidates) { 119 Operation *operandOp = operand.getDefiningOp(); 120 if (!operandOp) 121 continue; 122 extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues); 123 } 124 125 // Insert operations so that the defs get cloned before uses. 126 BlockAndValueMapping map; 127 OpBuilder builder(launchOpBody); 128 for (Operation *op : toBeSunk) { 129 Operation *clonedOp = builder.clone(*op, map); 130 // Only replace uses within the launch op. 131 for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults())) 132 replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair), 133 launchOp.body()); 134 } 135 return success(); 136 } 137 138 /// Outline the `gpu.launch` operation body into a kernel function. Replace 139 /// `gpu.terminator` operations by `gpu.return` in the generated function. 140 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp, 141 StringRef kernelFnName, 142 SetVector<Value> &operands) { 143 Location loc = launchOp.getLoc(); 144 // Create a builder with no insertion point, insertion will happen separately 145 // due to symbol table manipulation. 146 OpBuilder builder(launchOp.getContext()); 147 Region &launchOpBody = launchOp.body(); 148 149 // Identify uses from values defined outside of the scope of the launch 150 // operation. 151 getUsedValuesDefinedAbove(launchOpBody, operands); 152 153 // Create the gpu.func operation. 154 SmallVector<Type, 4> kernelOperandTypes; 155 kernelOperandTypes.reserve(operands.size()); 156 for (Value operand : operands) { 157 kernelOperandTypes.push_back(operand.getType()); 158 } 159 FunctionType type = 160 FunctionType::get(launchOp.getContext(), kernelOperandTypes, {}); 161 auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type); 162 outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(), 163 builder.getUnitAttr()); 164 BlockAndValueMapping map; 165 166 // Map the arguments corresponding to the launch parameters like blockIdx, 167 // threadIdx, etc. 168 Region &outlinedFuncBody = outlinedFunc.body(); 169 injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map); 170 171 // Map arguments from gpu.launch region to the arguments of the gpu.func 172 // operation. 173 Block &entryBlock = outlinedFuncBody.front(); 174 for (const auto &operand : enumerate(operands)) 175 map.map(operand.value(), entryBlock.getArgument(operand.index())); 176 177 // Clone the region of the gpu.launch operation into the gpu.func operation. 178 // TODO: If cloneInto can be modified such that if a mapping for 179 // a block exists, that block will be used to clone operations into (at the 180 // end of the block), instead of creating a new block, this would be much 181 // cleaner. 182 launchOpBody.cloneInto(&outlinedFuncBody, map); 183 184 // Branch from entry of the gpu.func operation to the block that is cloned 185 // from the entry block of the gpu.launch operation. 186 Block &launchOpEntry = launchOpBody.front(); 187 Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry); 188 builder.setInsertionPointToEnd(&entryBlock); 189 builder.create<BranchOp>(loc, clonedLaunchOpEntry); 190 191 outlinedFunc.walk([](gpu::TerminatorOp op) { 192 OpBuilder replacer(op); 193 replacer.create<gpu::ReturnOp>(op.getLoc()); 194 op.erase(); 195 }); 196 return outlinedFunc; 197 } 198 199 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp, 200 StringRef kernelFnName, 201 llvm::SmallVectorImpl<Value> &operands) { 202 DenseSet<Value> inputOperandSet; 203 inputOperandSet.insert(operands.begin(), operands.end()); 204 SetVector<Value> operandSet(operands.begin(), operands.end()); 205 auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet); 206 for (auto operand : operandSet) { 207 if (!inputOperandSet.count(operand)) 208 operands.push_back(operand); 209 } 210 return funcOp; 211 } 212 213 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation 214 /// launching `kernelFunc`. The kernel func contains the body of the 215 /// `gpu.launch` with constant region arguments inlined. 216 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp, 217 gpu::GPUFuncOp kernelFunc, 218 ValueRange operands) { 219 OpBuilder builder(launchOp); 220 // The launch op has an optional dynamic shared memory size. If it doesn't 221 // exist, we use zero. 222 builder.create<gpu::LaunchFuncOp>( 223 launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(), 224 launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(), 225 operands); 226 launchOp.erase(); 227 } 228 229 namespace { 230 /// Pass that moves the kernel of each LaunchOp into its separate nested module. 231 /// 232 /// This pass moves the kernel code of each LaunchOp into a function created 233 /// inside a nested module. It also creates an external function of the same 234 /// name in the parent module. 235 /// 236 /// The gpu.modules are intended to be compiled to a cubin blob independently in 237 /// a separate pass. The external functions can then be annotated with the 238 /// symbol of the cubin accessor function. 239 class GpuKernelOutliningPass 240 : public GpuKernelOutliningBase<GpuKernelOutliningPass> { 241 public: 242 GpuKernelOutliningPass(StringRef dlStr) { 243 if (!dlStr.empty() && !dataLayoutStr.hasValue()) 244 dataLayoutStr = dlStr.str(); 245 } 246 247 GpuKernelOutliningPass(const GpuKernelOutliningPass &other) 248 : dataLayoutSpec(other.dataLayoutSpec) { 249 dataLayoutStr = other.dataLayoutStr; 250 } 251 252 LogicalResult initialize(MLIRContext *context) override { 253 // Initialize the data layout specification from the data layout string. 254 if (!dataLayoutStr.empty()) { 255 Attribute resultAttr = mlir::parseAttribute(dataLayoutStr, context); 256 if (!resultAttr) 257 return failure(); 258 259 dataLayoutSpec = resultAttr.dyn_cast<DataLayoutSpecInterface>(); 260 if (!dataLayoutSpec) 261 return failure(); 262 } 263 264 return success(); 265 } 266 267 void runOnOperation() override { 268 SymbolTable symbolTable(getOperation()); 269 bool modified = false; 270 for (auto func : getOperation().getOps<FuncOp>()) { 271 // Insert just after the function. 272 Block::iterator insertPt(func->getNextNode()); 273 auto funcWalkResult = func.walk([&](gpu::LaunchOp op) { 274 SetVector<Value> operands; 275 std::string kernelFnName = 276 Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str(); 277 278 // Pull in instructions that can be sunk 279 if (failed(sinkOperationsIntoLaunchOp(op))) 280 return WalkResult::interrupt(); 281 gpu::GPUFuncOp outlinedFunc = 282 outlineKernelFuncImpl(op, kernelFnName, operands); 283 284 // Create nested module and insert outlinedFunc. The module will 285 // originally get the same name as the function, but may be renamed on 286 // insertion into the parent module. 287 auto kernelModule = createKernelModule(outlinedFunc, symbolTable); 288 symbolTable.insert(kernelModule, insertPt); 289 290 // Potentially changes signature, pulling in constants. 291 convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef()); 292 modified = true; 293 return WalkResult::advance(); 294 }); 295 if (funcWalkResult.wasInterrupted()) 296 return signalPassFailure(); 297 } 298 299 // If any new module was inserted in this module, annotate this module as 300 // a container module. 301 if (modified) 302 getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(), 303 UnitAttr::get(&getContext())); 304 } 305 306 private: 307 /// Returns a gpu.module containing kernelFunc and all callees (recursive). 308 gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc, 309 const SymbolTable &parentSymbolTable) { 310 // TODO: This code cannot use an OpBuilder because it must be inserted into 311 // a SymbolTable by the caller. SymbolTable needs to be refactored to 312 // prevent manual building of Ops with symbols in code using SymbolTables 313 // and then this needs to use the OpBuilder. 314 auto *context = getOperation().getContext(); 315 OpBuilder builder(context); 316 auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(), 317 kernelFunc.getName()); 318 319 // If a valid data layout spec was provided, attach it to the kernel module. 320 // Otherwise, the default data layout will be used. 321 if (dataLayoutSpec) 322 kernelModule->setAttr(DLTIDialect::kDataLayoutAttrName, dataLayoutSpec); 323 324 SymbolTable symbolTable(kernelModule); 325 symbolTable.insert(kernelFunc); 326 327 SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc}; 328 while (!symbolDefWorklist.empty()) { 329 if (Optional<SymbolTable::UseRange> symbolUses = 330 SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) { 331 for (SymbolTable::SymbolUse symbolUse : *symbolUses) { 332 StringRef symbolName = 333 symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue(); 334 if (symbolTable.lookup(symbolName)) 335 continue; 336 337 Operation *symbolDefClone = 338 parentSymbolTable.lookup(symbolName)->clone(); 339 symbolDefWorklist.push_back(symbolDefClone); 340 symbolTable.insert(symbolDefClone); 341 } 342 } 343 } 344 345 return kernelModule; 346 } 347 348 Option<std::string> dataLayoutStr{ 349 *this, "data-layout-str", 350 llvm::cl::desc("String containing the data layout specification to be " 351 "attached to the GPU kernel module")}; 352 353 DataLayoutSpecInterface dataLayoutSpec; 354 }; 355 356 } // namespace 357 358 std::unique_ptr<OperationPass<ModuleOp>> 359 mlir::createGpuKernelOutliningPass(StringRef dataLayoutStr) { 360 return std::make_unique<GpuKernelOutliningPass>(dataLayoutStr); 361 } 362