1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===// 2 // 3 // Copyright 2019 The MLIR Authors. 4 // 5 // Licensed under the Apache License, Version 2.0 (the "License"); 6 // you may not use this file except in compliance with the License. 7 // You may obtain a copy of the License at 8 // 9 // http://www.apache.org/licenses/LICENSE-2.0 10 // 11 // Unless required by applicable law or agreed to in writing, software 12 // distributed under the License is distributed on an "AS IS" BASIS, 13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 // See the License for the specific language governing permissions and 15 // limitations under the License. 16 // ============================================================================= 17 // 18 // This file implements the GPU dialect kernel outlining pass. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "mlir/Dialect/GPU/GPUDialect.h" 23 #include "mlir/Dialect/GPU/Passes.h" 24 #include "mlir/Dialect/StandardOps/Ops.h" 25 #include "mlir/IR/BlockAndValueMapping.h" 26 #include "mlir/IR/Builders.h" 27 #include "mlir/Pass/Pass.h" 28 29 using namespace mlir; 30 31 template <typename OpTy> 32 static void createForAllDimensions(OpBuilder &builder, Location loc, 33 SmallVectorImpl<Value *> &values) { 34 for (StringRef dim : {"x", "y", "z"}) { 35 Value *v = builder.create<OpTy>(loc, builder.getIndexType(), 36 builder.getStringAttr(dim)); 37 values.push_back(v); 38 } 39 } 40 41 // Add operations generating block/thread ids and gird/block dimensions at the 42 // beginning of `kernelFunc` and replace uses of the respective function args. 43 static void injectGpuIndexOperations(Location loc, FuncOp kernelFunc) { 44 OpBuilder OpBuilder(kernelFunc.getBody()); 45 SmallVector<Value *, 12> indexOps; 46 createForAllDimensions<gpu::BlockIdOp>(OpBuilder, loc, indexOps); 47 createForAllDimensions<gpu::ThreadIdOp>(OpBuilder, loc, indexOps); 48 createForAllDimensions<gpu::GridDimOp>(OpBuilder, loc, indexOps); 49 createForAllDimensions<gpu::BlockDimOp>(OpBuilder, loc, indexOps); 50 // Replace the leading 12 function args with the respective thread/block index 51 // operations. Iterate backwards since args are erased and indices change. 52 for (int i = 11; i >= 0; --i) { 53 auto &firstBlock = kernelFunc.front(); 54 firstBlock.getArgument(i)->replaceAllUsesWith(indexOps[i]); 55 firstBlock.eraseArgument(i); 56 } 57 } 58 59 static bool isInliningBeneficiary(Operation *op) { 60 return isa<ConstantOp>(op) || isa<DimOp>(op); 61 } 62 63 // Move arguments of the given kernel function into the function if this reduces 64 // the number of kernel arguments. 65 static gpu::LaunchFuncOp inlineBeneficiaryOps(FuncOp kernelFunc, 66 gpu::LaunchFuncOp launch) { 67 OpBuilder kernelBuilder(kernelFunc.getBody()); 68 auto &firstBlock = kernelFunc.getBody().front(); 69 llvm::SmallVector<Value *, 8> newLaunchArgs; 70 BlockAndValueMapping map; 71 for (int i = 0, e = launch.getNumKernelOperands(); i < e; ++i) { 72 map.map(launch.getKernelOperand(i), kernelFunc.getArgument(i)); 73 } 74 for (int i = launch.getNumKernelOperands() - 1; i >= 0; --i) { 75 auto operandOp = launch.getKernelOperand(i)->getDefiningOp(); 76 if (!operandOp || !isInliningBeneficiary(operandOp)) { 77 newLaunchArgs.push_back(launch.getKernelOperand(i)); 78 continue; 79 } 80 // Only inline operations that do not create new arguments. 81 if (!llvm::all_of(operandOp->getOperands(), 82 [map](Value *value) { return map.contains(value); })) { 83 continue; 84 } 85 auto clone = kernelBuilder.clone(*operandOp, map); 86 firstBlock.getArgument(i)->replaceAllUsesWith(clone->getResult(0)); 87 firstBlock.eraseArgument(i); 88 } 89 if (newLaunchArgs.size() == launch.getNumKernelOperands()) 90 return launch; 91 92 std::reverse(newLaunchArgs.begin(), newLaunchArgs.end()); 93 OpBuilder LaunchBuilder(launch); 94 SmallVector<Type, 8> newArgumentTypes; 95 newArgumentTypes.reserve(firstBlock.getNumArguments()); 96 for (auto value : firstBlock.getArguments()) { 97 newArgumentTypes.push_back(value->getType()); 98 } 99 kernelFunc.setType(LaunchBuilder.getFunctionType(newArgumentTypes, {})); 100 auto newLaunch = LaunchBuilder.create<gpu::LaunchFuncOp>( 101 launch.getLoc(), kernelFunc, launch.getGridSizeOperandValues(), 102 launch.getBlockSizeOperandValues(), newLaunchArgs); 103 launch.erase(); 104 return newLaunch; 105 } 106 107 // Outline the `gpu.launch` operation body into a kernel function. Replace 108 // `gpu.return` operations by `std.return` in the generated function. 109 static FuncOp outlineKernelFunc(gpu::LaunchOp launchOp) { 110 Location loc = launchOp.getLoc(); 111 SmallVector<Type, 4> kernelOperandTypes(launchOp.getKernelOperandTypes()); 112 FunctionType type = 113 FunctionType::get(kernelOperandTypes, {}, launchOp.getContext()); 114 std::string kernelFuncName = 115 Twine(launchOp.getParentOfType<FuncOp>().getName(), "_kernel").str(); 116 FuncOp outlinedFunc = FuncOp::create(loc, kernelFuncName, type); 117 outlinedFunc.getBody().takeBody(launchOp.getBody()); 118 Builder builder(launchOp.getContext()); 119 outlinedFunc.setAttr(gpu::GPUDialect::getKernelFuncAttrName(), 120 builder.getUnitAttr()); 121 injectGpuIndexOperations(loc, outlinedFunc); 122 outlinedFunc.walk([](gpu::ReturnOp op) { 123 OpBuilder replacer(op); 124 replacer.create<ReturnOp>(op.getLoc()); 125 op.erase(); 126 }); 127 return outlinedFunc; 128 } 129 130 // Replace `gpu.launch` operations with an `gpu.launch_func` operation launching 131 // `kernelFunc`. The kernel func contains the body of the `gpu.launch` with 132 // constant region arguments inlined. 133 static void convertToLaunchFuncOp(gpu::LaunchOp &launchOp, FuncOp kernelFunc) { 134 OpBuilder builder(launchOp); 135 SmallVector<Value *, 4> kernelOperandValues( 136 launchOp.getKernelOperandValues()); 137 auto launchFuncOp = builder.create<gpu::LaunchFuncOp>( 138 launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(), 139 launchOp.getBlockSizeOperandValues(), kernelOperandValues); 140 inlineBeneficiaryOps(kernelFunc, launchFuncOp); 141 launchOp.erase(); 142 } 143 144 namespace { 145 146 /// Pass that moves the kernel of each LaunchOp into its separate nested module. 147 /// 148 /// This pass moves the kernel code of each LaunchOp into a function created 149 /// inside a nested module. It also creates an external function of the same 150 /// name in the parent module. 151 /// 152 /// The kernel modules are intended to be compiled to a cubin blob independently 153 /// in a separate pass. The external functions can then be annotated with the 154 /// symbol of the cubin accessor function. 155 class GpuKernelOutliningPass : public ModulePass<GpuKernelOutliningPass> { 156 public: 157 void runOnModule() override { 158 ModuleManager moduleManager(getModule()); 159 bool modified = false; 160 for (auto func : getModule().getOps<FuncOp>()) { 161 // Insert just after the function. 162 Block::iterator insertPt(func.getOperation()->getNextNode()); 163 func.walk([&](gpu::LaunchOp op) { 164 FuncOp outlinedFunc = outlineKernelFunc(op); 165 166 // Create nested module and insert outlinedFunc. The module will 167 // originally get the same name as the function, but may be renamed on 168 // insertion into the parent module. 169 auto kernelModule = createKernelModule(outlinedFunc, moduleManager); 170 moduleManager.insert(insertPt, kernelModule); 171 172 // Potentially changes signature, pulling in constants. 173 convertToLaunchFuncOp(op, outlinedFunc); 174 modified = true; 175 }); 176 } 177 178 // If any new module was inserted in this module, annotate this module as 179 // a container module. 180 if (modified) 181 getModule().setAttr(gpu::GPUDialect::getContainerModuleAttrName(), 182 UnitAttr::get(&getContext())); 183 } 184 185 private: 186 // Returns a module containing kernelFunc and all callees (recursive). 187 ModuleOp createKernelModule(FuncOp kernelFunc, 188 const ModuleManager &parentModuleManager) { 189 auto context = getModule().getContext(); 190 Builder builder(context); 191 auto kernelModule = 192 ModuleOp::create(builder.getUnknownLoc(), kernelFunc.getName()); 193 kernelModule.setAttr(gpu::GPUDialect::getKernelModuleAttrName(), 194 builder.getUnitAttr()); 195 ModuleManager moduleManager(kernelModule); 196 197 moduleManager.insert(kernelFunc); 198 199 llvm::SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc}; 200 while (!symbolDefWorklist.empty()) { 201 if (Optional<SymbolTable::UseRange> symbolUses = 202 SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) { 203 for (SymbolTable::SymbolUse symbolUse : *symbolUses) { 204 StringRef symbolName = 205 symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue(); 206 if (moduleManager.lookupSymbol(symbolName)) 207 continue; 208 209 Operation *symbolDefClone = 210 parentModuleManager.lookupSymbol(symbolName)->clone(); 211 symbolDefWorklist.push_back(symbolDefClone); 212 moduleManager.insert(symbolDefClone); 213 } 214 } 215 } 216 217 return kernelModule; 218 } 219 }; 220 221 } // namespace 222 223 std::unique_ptr<OpPassBase<ModuleOp>> mlir::createGpuKernelOutliningPass() { 224 return std::make_unique<GpuKernelOutliningPass>(); 225 } 226 227 static PassRegistration<GpuKernelOutliningPass> 228 pass("gpu-kernel-outlining", 229 "Outline gpu.launch bodies to kernel functions."); 230