1 //===- MLInlineAdvisor.cpp - machine learned InlineAdvisor ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the interface between the inliner and a learned model. 10 // It delegates model evaluation to either the AOT compiled model (the 11 // 'release' mode) or a runtime-loaded model (the 'development' case). 12 // 13 //===----------------------------------------------------------------------===// 14 #include "llvm/Config/config.h" 15 #if defined(LLVM_HAVE_TF_AOT) || defined(LLVM_HAVE_TF_API) 16 17 #include <limits> 18 #include <unordered_map> 19 #include <unordered_set> 20 21 #include "llvm/ADT/SCCIterator.h" 22 #include "llvm/Analysis/CallGraph.h" 23 #include "llvm/Analysis/FunctionPropertiesAnalysis.h" 24 #include "llvm/Analysis/InlineCost.h" 25 #include "llvm/Analysis/MLInlineAdvisor.h" 26 #include "llvm/Analysis/MLModelRunner.h" 27 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/IR/InstIterator.h" 31 #include "llvm/IR/Instructions.h" 32 #include "llvm/IR/PassManager.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Path.h" 35 36 using namespace llvm; 37 38 #ifdef LLVM_HAVE_TF_AOT 39 #include "llvm/Analysis/ReleaseModeModelRunner.h" 40 // codegen-ed file 41 #include "InlinerSizeModel.h" // NOLINT 42 #include "llvm/Analysis/InlineModelFeatureMaps.h" 43 44 std::unique_ptr<InlineAdvisor> 45 llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) { 46 auto AOTRunner = 47 std::make_unique<ReleaseModeModelRunner<llvm::InlinerSizeModel>>( 48 M.getContext(), FeatureNameMap, DecisionName); 49 return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner)); 50 } 51 #endif 52 53 #define DEBUG_TYPE "inline-ml" 54 55 static cl::opt<float> SizeIncreaseThreshold( 56 "ml-advisor-size-increase-threshold", cl::Hidden, 57 cl::desc("Maximum factor by which expected native size may increase before " 58 "blocking any further inlining."), 59 cl::init(2.0)); 60 61 // clang-format off 62 const std::array<std::string, NumberOfFeatures> llvm::FeatureNameMap{ 63 // InlineCost features - these must come first 64 #define POPULATE_NAMES(INDEX_NAME, NAME) NAME, 65 INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES) 66 #undef POPULATE_NAMES 67 68 // Non-cost features 69 #define POPULATE_NAMES(INDEX_NAME, NAME, COMMENT) NAME, 70 INLINE_FEATURE_ITERATOR(POPULATE_NAMES) 71 #undef POPULATE_NAMES 72 }; 73 // clang-format on 74 75 const char *const llvm::DecisionName = "inlining_decision"; 76 const char *const llvm::DefaultDecisionName = "inlining_default"; 77 const char *const llvm::RewardName = "delta_size"; 78 79 CallBase *getInlinableCS(Instruction &I) { 80 if (auto *CS = dyn_cast<CallBase>(&I)) 81 if (Function *Callee = CS->getCalledFunction()) { 82 if (!Callee->isDeclaration()) { 83 return CS; 84 } 85 } 86 return nullptr; 87 } 88 89 MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM, 90 std::unique_ptr<MLModelRunner> Runner) 91 : InlineAdvisor( 92 M, MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()), 93 ModelRunner(std::move(Runner)), CG(new CallGraph(M)), 94 InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize) { 95 assert(ModelRunner); 96 97 // Extract the 'call site height' feature - the position of a call site 98 // relative to the farthest statically reachable SCC node. We don't mutate 99 // this value while inlining happens. Empirically, this feature proved 100 // critical in behavioral cloning - i.e. training a model to mimic the manual 101 // heuristic's decisions - and, thus, equally important for training for 102 // improvement. 103 for (auto I = scc_begin(CG.get()); !I.isAtEnd(); ++I) { 104 const std::vector<CallGraphNode *> &CGNodes = *I; 105 unsigned Level = 0; 106 for (auto *CGNode : CGNodes) { 107 Function *F = CGNode->getFunction(); 108 if (!F || F->isDeclaration()) 109 continue; 110 for (auto &I : instructions(F)) { 111 if (auto *CS = getInlinableCS(I)) { 112 auto *Called = CS->getCalledFunction(); 113 auto Pos = FunctionLevels.find(Called); 114 // In bottom up traversal, an inlinable callee is either in the 115 // same SCC, or to a function in a visited SCC. So not finding its 116 // level means we haven't visited it yet, meaning it's in this SCC. 117 if (Pos == FunctionLevels.end()) 118 continue; 119 Level = std::max(Level, Pos->second + 1); 120 } 121 } 122 } 123 for (auto *CGNode : CGNodes) { 124 Function *F = CGNode->getFunction(); 125 if (F && !F->isDeclaration()) 126 FunctionLevels[F] = Level; 127 } 128 } 129 } 130 131 void MLInlineAdvisor::onPassEntry() { 132 // Function passes executed between InlinerPass runs may have changed the 133 // module-wide features. 134 if (!Invalid) 135 return; 136 NodeCount = 0; 137 EdgeCount = 0; 138 for (auto &F : M) 139 if (!F.isDeclaration()) { 140 ++NodeCount; 141 EdgeCount += getLocalCalls(F); 142 } 143 Invalid = false; 144 } 145 146 int64_t MLInlineAdvisor::getLocalCalls(Function &F) { 147 return FAM.getResult<FunctionPropertiesAnalysis>(F) 148 .DirectCallsToDefinedFunctions; 149 } 150 151 // Update the internal state of the advisor, and force invalidate feature 152 // analysis. Currently, we maintain minimal (and very simple) global state - the 153 // number of functions and the number of static calls. We also keep track of the 154 // total IR size in this module, to stop misbehaving policies at a certain bloat 155 // factor (SizeIncreaseThreshold) 156 void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice, 157 bool CalleeWasDeleted) { 158 assert(!ForceStop); 159 Function *Caller = Advice.getCaller(); 160 Function *Callee = Advice.getCallee(); 161 162 // The caller features aren't valid anymore. 163 { 164 PreservedAnalyses PA = PreservedAnalyses::all(); 165 PA.abandon<FunctionPropertiesAnalysis>(); 166 FAM.invalidate(*Caller, PA); 167 } 168 int64_t IRSizeAfter = 169 getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize); 170 CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize); 171 if (CurrentIRSize > SizeIncreaseThreshold * InitialIRSize) 172 ForceStop = true; 173 174 // We can delta-update module-wide features. We know the inlining only changed 175 // the caller, and maybe the callee (by deleting the latter). 176 // Nodes are simple to update. 177 // For edges, we 'forget' the edges that the caller and callee used to have 178 // before inlining, and add back what they currently have together. 179 int64_t NewCallerAndCalleeEdges = 180 FAM.getResult<FunctionPropertiesAnalysis>(*Caller) 181 .DirectCallsToDefinedFunctions; 182 183 if (CalleeWasDeleted) 184 --NodeCount; 185 else 186 NewCallerAndCalleeEdges += 187 FAM.getResult<FunctionPropertiesAnalysis>(*Callee) 188 .DirectCallsToDefinedFunctions; 189 EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges); 190 assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0); 191 } 192 193 int64_t MLInlineAdvisor::getModuleIRSize() const { 194 int64_t Ret = 0; 195 for (auto &F : CG->getModule()) 196 if (!F.isDeclaration()) 197 Ret += getIRSize(F); 198 return Ret; 199 } 200 201 std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) { 202 auto &Caller = *CB.getCaller(); 203 auto &Callee = *CB.getCalledFunction(); 204 205 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 206 return FAM.getResult<AssumptionAnalysis>(F); 207 }; 208 auto &TIR = FAM.getResult<TargetIRAnalysis>(Callee); 209 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller); 210 211 auto MandatoryKind = InlineAdvisor::getMandatoryKind(CB, FAM, ORE); 212 // If this is a "never inline" case, there won't be any changes to internal 213 // state we need to track, so we can just return the base InlineAdvice, which 214 // will do nothing interesting. 215 // Same thing if this is a recursive case. 216 if (MandatoryKind == InlineAdvisor::MandatoryInliningKind::Never || 217 &Caller == &Callee) 218 return getMandatoryAdvice(CB, false); 219 220 bool Mandatory = 221 MandatoryKind == InlineAdvisor::MandatoryInliningKind::Always; 222 223 // If we need to stop, we won't want to track anymore any state changes, so 224 // we just return the base InlineAdvice, which acts as a noop. 225 if (ForceStop) { 226 ORE.emit([&] { 227 return OptimizationRemarkMissed(DEBUG_TYPE, "ForceStop", &CB) 228 << "Won't attempt inlining because module size grew too much."; 229 }); 230 return std::make_unique<InlineAdvice>(this, CB, ORE, Mandatory); 231 } 232 233 int CostEstimate = 0; 234 if (!Mandatory) { 235 auto IsCallSiteInlinable = 236 llvm::getInliningCostEstimate(CB, TIR, GetAssumptionCache); 237 if (!IsCallSiteInlinable) { 238 // We can't inline this for correctness reasons, so return the base 239 // InlineAdvice, as we don't care about tracking any state changes (which 240 // won't happen). 241 return std::make_unique<InlineAdvice>(this, CB, ORE, false); 242 } 243 CostEstimate = *IsCallSiteInlinable; 244 } 245 246 const auto CostFeatures = 247 llvm::getInliningCostFeatures(CB, TIR, GetAssumptionCache); 248 if (!CostFeatures) { 249 return std::make_unique<InlineAdvice>(this, CB, ORE, false); 250 } 251 252 if (Mandatory) 253 return getMandatoryAdvice(CB, true); 254 255 auto NrCtantParams = 0; 256 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) { 257 NrCtantParams += (isa<Constant>(*I)); 258 } 259 260 auto &CallerBefore = FAM.getResult<FunctionPropertiesAnalysis>(Caller); 261 auto &CalleeBefore = FAM.getResult<FunctionPropertiesAnalysis>(Callee); 262 263 *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeBasicBlockCount) = 264 CalleeBefore.BasicBlockCount; 265 *ModelRunner->getTensor<int64_t>(FeatureIndex::CallSiteHeight) = 266 FunctionLevels[&Caller]; 267 *ModelRunner->getTensor<int64_t>(FeatureIndex::NodeCount) = NodeCount; 268 *ModelRunner->getTensor<int64_t>(FeatureIndex::NrCtantParams) = NrCtantParams; 269 *ModelRunner->getTensor<int64_t>(FeatureIndex::EdgeCount) = EdgeCount; 270 *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerUsers) = 271 CallerBefore.Uses; 272 *ModelRunner->getTensor<int64_t>( 273 FeatureIndex::CallerConditionallyExecutedBlocks) = 274 CallerBefore.BlocksReachedFromConditionalInstruction; 275 *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerBasicBlockCount) = 276 CallerBefore.BasicBlockCount; 277 *ModelRunner->getTensor<int64_t>( 278 FeatureIndex::CalleeConditionallyExecutedBlocks) = 279 CalleeBefore.BlocksReachedFromConditionalInstruction; 280 *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeUsers) = 281 CalleeBefore.Uses; 282 *ModelRunner->getTensor<int64_t>(FeatureIndex::CostEstimate) = CostEstimate; 283 284 // Add the cost features 285 for (size_t I = 0; 286 I < static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures); ++I) { 287 *ModelRunner->getTensor<int64_t>(inlineCostFeatureToMlFeature( 288 static_cast<InlineCostFeatureIndex>(I))) = CostFeatures->at(I); 289 } 290 291 return getAdviceFromModel(CB, ORE); 292 } 293 294 std::unique_ptr<MLInlineAdvice> 295 MLInlineAdvisor::getAdviceFromModel(CallBase &CB, 296 OptimizationRemarkEmitter &ORE) { 297 return std::make_unique<MLInlineAdvice>( 298 this, CB, ORE, static_cast<bool>(ModelRunner->evaluate<int64_t>())); 299 } 300 301 std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB, 302 bool Advice) { 303 // Make sure we track inlinings in all cases - mandatory or not. 304 if (Advice && !ForceStop) 305 return getMandatoryAdviceImpl(CB); 306 307 // If this is a "never inline" case, there won't be any changes to internal 308 // state we need to track, so we can just return the base InlineAdvice, which 309 // will do nothing interesting. 310 // Same if we are forced to stop - we don't track anymore. 311 return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), Advice); 312 } 313 314 std::unique_ptr<MLInlineAdvice> 315 MLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) { 316 return std::make_unique<MLInlineAdvice>(this, CB, getCallerORE(CB), true); 317 } 318 319 void MLInlineAdvice::reportContextForRemark( 320 DiagnosticInfoOptimizationBase &OR) { 321 using namespace ore; 322 OR << NV("Callee", Callee->getName()); 323 for (size_t I = 0; I < NumberOfFeatures; ++I) 324 OR << NV(FeatureNameMap[I], 325 *getAdvisor()->getModelRunner().getTensor<int64_t>(I)); 326 OR << NV("ShouldInline", isInliningRecommended()); 327 } 328 329 void MLInlineAdvice::recordInliningImpl() { 330 ORE.emit([&]() { 331 OptimizationRemark R(DEBUG_TYPE, "InliningSuccess", DLoc, Block); 332 reportContextForRemark(R); 333 return R; 334 }); 335 getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ false); 336 } 337 338 void MLInlineAdvice::recordInliningWithCalleeDeletedImpl() { 339 ORE.emit([&]() { 340 OptimizationRemark R(DEBUG_TYPE, "InliningSuccessWithCalleeDeleted", DLoc, 341 Block); 342 reportContextForRemark(R); 343 return R; 344 }); 345 getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ true); 346 } 347 348 void MLInlineAdvice::recordUnsuccessfulInliningImpl( 349 const InlineResult &Result) { 350 ORE.emit([&]() { 351 OptimizationRemarkMissed R(DEBUG_TYPE, "InliningAttemptedAndUnsuccessful", 352 DLoc, Block); 353 reportContextForRemark(R); 354 return R; 355 }); 356 } 357 void MLInlineAdvice::recordUnattemptedInliningImpl() { 358 ORE.emit([&]() { 359 OptimizationRemarkMissed R(DEBUG_TYPE, "IniningNotAttempted", DLoc, Block); 360 reportContextForRemark(R); 361 return R; 362 }); 363 } 364 #endif // defined(LLVM_HAVE_TF_AOT) || defined(LLVM_HAVE_TF_API) 365