1 //===- BranchProbabilityInfo.cpp - Branch Probability Analysis ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Loops should be simplified before this analysis. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/BranchProbabilityInfo.h" 14 #include "llvm/ADT/PostOrderIterator.h" 15 #include "llvm/ADT/SCCIterator.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/PostDominators.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/IR/Attributes.h" 23 #include "llvm/IR/BasicBlock.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/Dominators.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/LLVMContext.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/IR/PassManager.h" 34 #include "llvm/IR/ProfDataUtils.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/InitializePasses.h" 38 #include "llvm/Pass.h" 39 #include "llvm/Support/BranchProbability.h" 40 #include "llvm/Support/Casting.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include <cassert> 45 #include <cstdint> 46 #include <iterator> 47 #include <map> 48 #include <utility> 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "branch-prob" 53 54 static cl::opt<bool> PrintBranchProb( 55 "print-bpi", cl::init(false), cl::Hidden, 56 cl::desc("Print the branch probability info.")); 57 58 cl::opt<std::string> PrintBranchProbFuncName( 59 "print-bpi-func-name", cl::Hidden, 60 cl::desc("The option to specify the name of the function " 61 "whose branch probability info is printed.")); 62 63 INITIALIZE_PASS_BEGIN(BranchProbabilityInfoWrapperPass, "branch-prob", 64 "Branch Probability Analysis", false, true) 65 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 66 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 67 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 68 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) 69 INITIALIZE_PASS_END(BranchProbabilityInfoWrapperPass, "branch-prob", 70 "Branch Probability Analysis", false, true) 71 72 BranchProbabilityInfoWrapperPass::BranchProbabilityInfoWrapperPass() 73 : FunctionPass(ID) { 74 initializeBranchProbabilityInfoWrapperPassPass( 75 *PassRegistry::getPassRegistry()); 76 } 77 78 char BranchProbabilityInfoWrapperPass::ID = 0; 79 80 // Weights are for internal use only. They are used by heuristics to help to 81 // estimate edges' probability. Example: 82 // 83 // Using "Loop Branch Heuristics" we predict weights of edges for the 84 // block BB2. 85 // ... 86 // | 87 // V 88 // BB1<-+ 89 // | | 90 // | | (Weight = 124) 91 // V | 92 // BB2--+ 93 // | 94 // | (Weight = 4) 95 // V 96 // BB3 97 // 98 // Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875 99 // Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125 100 static const uint32_t LBH_TAKEN_WEIGHT = 124; 101 static const uint32_t LBH_NONTAKEN_WEIGHT = 4; 102 103 /// Unreachable-terminating branch taken probability. 104 /// 105 /// This is the probability for a branch being taken to a block that terminates 106 /// (eventually) in unreachable. These are predicted as unlikely as possible. 107 /// All reachable probability will proportionally share the remaining part. 108 static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1); 109 110 /// Heuristics and lookup tables for non-loop branches: 111 /// Pointer Heuristics (PH) 112 static const uint32_t PH_TAKEN_WEIGHT = 20; 113 static const uint32_t PH_NONTAKEN_WEIGHT = 12; 114 static const BranchProbability 115 PtrTakenProb(PH_TAKEN_WEIGHT, PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT); 116 static const BranchProbability 117 PtrUntakenProb(PH_NONTAKEN_WEIGHT, PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT); 118 119 using ProbabilityList = SmallVector<BranchProbability>; 120 using ProbabilityTable = std::map<CmpInst::Predicate, ProbabilityList>; 121 122 /// Pointer comparisons: 123 static const ProbabilityTable PointerTable{ 124 {ICmpInst::ICMP_NE, {PtrTakenProb, PtrUntakenProb}}, /// p != q -> Likely 125 {ICmpInst::ICMP_EQ, {PtrUntakenProb, PtrTakenProb}}, /// p == q -> Unlikely 126 }; 127 128 /// Zero Heuristics (ZH) 129 static const uint32_t ZH_TAKEN_WEIGHT = 20; 130 static const uint32_t ZH_NONTAKEN_WEIGHT = 12; 131 static const BranchProbability 132 ZeroTakenProb(ZH_TAKEN_WEIGHT, ZH_TAKEN_WEIGHT + ZH_NONTAKEN_WEIGHT); 133 static const BranchProbability 134 ZeroUntakenProb(ZH_NONTAKEN_WEIGHT, ZH_TAKEN_WEIGHT + ZH_NONTAKEN_WEIGHT); 135 136 /// Integer compares with 0: 137 static const ProbabilityTable ICmpWithZeroTable{ 138 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, /// X == 0 -> Unlikely 139 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, /// X != 0 -> Likely 140 {CmpInst::ICMP_SLT, {ZeroUntakenProb, ZeroTakenProb}}, /// X < 0 -> Unlikely 141 {CmpInst::ICMP_SGT, {ZeroTakenProb, ZeroUntakenProb}}, /// X > 0 -> Likely 142 }; 143 144 /// Integer compares with -1: 145 static const ProbabilityTable ICmpWithMinusOneTable{ 146 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, /// X == -1 -> Unlikely 147 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, /// X != -1 -> Likely 148 // InstCombine canonicalizes X >= 0 into X > -1 149 {CmpInst::ICMP_SGT, {ZeroTakenProb, ZeroUntakenProb}}, /// X >= 0 -> Likely 150 }; 151 152 /// Integer compares with 1: 153 static const ProbabilityTable ICmpWithOneTable{ 154 // InstCombine canonicalizes X <= 0 into X < 1 155 {CmpInst::ICMP_SLT, {ZeroUntakenProb, ZeroTakenProb}}, /// X <= 0 -> Unlikely 156 }; 157 158 /// strcmp and similar functions return zero, negative, or positive, if the 159 /// first string is equal, less, or greater than the second. We consider it 160 /// likely that the strings are not equal, so a comparison with zero is 161 /// probably false, but also a comparison with any other number is also 162 /// probably false given that what exactly is returned for nonzero values is 163 /// not specified. Any kind of comparison other than equality we know 164 /// nothing about. 165 static const ProbabilityTable ICmpWithLibCallTable{ 166 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, 167 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, 168 }; 169 170 // Floating-Point Heuristics (FPH) 171 static const uint32_t FPH_TAKEN_WEIGHT = 20; 172 static const uint32_t FPH_NONTAKEN_WEIGHT = 12; 173 174 /// This is the probability for an ordered floating point comparison. 175 static const uint32_t FPH_ORD_WEIGHT = 1024 * 1024 - 1; 176 /// This is the probability for an unordered floating point comparison, it means 177 /// one or two of the operands are NaN. Usually it is used to test for an 178 /// exceptional case, so the result is unlikely. 179 static const uint32_t FPH_UNO_WEIGHT = 1; 180 181 static const BranchProbability FPOrdTakenProb(FPH_ORD_WEIGHT, 182 FPH_ORD_WEIGHT + FPH_UNO_WEIGHT); 183 static const BranchProbability 184 FPOrdUntakenProb(FPH_UNO_WEIGHT, FPH_ORD_WEIGHT + FPH_UNO_WEIGHT); 185 static const BranchProbability 186 FPTakenProb(FPH_TAKEN_WEIGHT, FPH_TAKEN_WEIGHT + FPH_NONTAKEN_WEIGHT); 187 static const BranchProbability 188 FPUntakenProb(FPH_NONTAKEN_WEIGHT, FPH_TAKEN_WEIGHT + FPH_NONTAKEN_WEIGHT); 189 190 /// Floating-Point compares: 191 static const ProbabilityTable FCmpTable{ 192 {FCmpInst::FCMP_ORD, {FPOrdTakenProb, FPOrdUntakenProb}}, /// !isnan -> Likely 193 {FCmpInst::FCMP_UNO, {FPOrdUntakenProb, FPOrdTakenProb}}, /// isnan -> Unlikely 194 }; 195 196 /// Set of dedicated "absolute" execution weights for a block. These weights are 197 /// meaningful relative to each other and their derivatives only. 198 enum class BlockExecWeight : std::uint32_t { 199 /// Special weight used for cases with exact zero probability. 200 ZERO = 0x0, 201 /// Minimal possible non zero weight. 202 LOWEST_NON_ZERO = 0x1, 203 /// Weight to an 'unreachable' block. 204 UNREACHABLE = ZERO, 205 /// Weight to a block containing non returning call. 206 NORETURN = LOWEST_NON_ZERO, 207 /// Weight to 'unwind' block of an invoke instruction. 208 UNWIND = LOWEST_NON_ZERO, 209 /// Weight to a 'cold' block. Cold blocks are the ones containing calls marked 210 /// with attribute 'cold'. 211 COLD = 0xffff, 212 /// Default weight is used in cases when there is no dedicated execution 213 /// weight set. It is not propagated through the domination line either. 214 DEFAULT = 0xfffff 215 }; 216 217 BranchProbabilityInfo::SccInfo::SccInfo(const Function &F) { 218 // Record SCC numbers of blocks in the CFG to identify irreducible loops. 219 // FIXME: We could only calculate this if the CFG is known to be irreducible 220 // (perhaps cache this info in LoopInfo if we can easily calculate it there?). 221 int SccNum = 0; 222 for (scc_iterator<const Function *> It = scc_begin(&F); !It.isAtEnd(); 223 ++It, ++SccNum) { 224 // Ignore single-block SCCs since they either aren't loops or LoopInfo will 225 // catch them. 226 const std::vector<const BasicBlock *> &Scc = *It; 227 if (Scc.size() == 1) 228 continue; 229 230 LLVM_DEBUG(dbgs() << "BPI: SCC " << SccNum << ":"); 231 for (const auto *BB : Scc) { 232 LLVM_DEBUG(dbgs() << " " << BB->getName()); 233 SccNums[BB] = SccNum; 234 calculateSccBlockType(BB, SccNum); 235 } 236 LLVM_DEBUG(dbgs() << "\n"); 237 } 238 } 239 240 int BranchProbabilityInfo::SccInfo::getSCCNum(const BasicBlock *BB) const { 241 auto SccIt = SccNums.find(BB); 242 if (SccIt == SccNums.end()) 243 return -1; 244 return SccIt->second; 245 } 246 247 void BranchProbabilityInfo::SccInfo::getSccEnterBlocks( 248 int SccNum, SmallVectorImpl<BasicBlock *> &Enters) const { 249 250 for (auto MapIt : SccBlocks[SccNum]) { 251 const auto *BB = MapIt.first; 252 if (isSCCHeader(BB, SccNum)) 253 for (const auto *Pred : predecessors(BB)) 254 if (getSCCNum(Pred) != SccNum) 255 Enters.push_back(const_cast<BasicBlock *>(BB)); 256 } 257 } 258 259 void BranchProbabilityInfo::SccInfo::getSccExitBlocks( 260 int SccNum, SmallVectorImpl<BasicBlock *> &Exits) const { 261 for (auto MapIt : SccBlocks[SccNum]) { 262 const auto *BB = MapIt.first; 263 if (isSCCExitingBlock(BB, SccNum)) 264 for (const auto *Succ : successors(BB)) 265 if (getSCCNum(Succ) != SccNum) 266 Exits.push_back(const_cast<BasicBlock *>(Succ)); 267 } 268 } 269 270 uint32_t BranchProbabilityInfo::SccInfo::getSccBlockType(const BasicBlock *BB, 271 int SccNum) const { 272 assert(getSCCNum(BB) == SccNum); 273 274 assert(SccBlocks.size() > static_cast<unsigned>(SccNum) && "Unknown SCC"); 275 const auto &SccBlockTypes = SccBlocks[SccNum]; 276 277 auto It = SccBlockTypes.find(BB); 278 if (It != SccBlockTypes.end()) { 279 return It->second; 280 } 281 return Inner; 282 } 283 284 void BranchProbabilityInfo::SccInfo::calculateSccBlockType(const BasicBlock *BB, 285 int SccNum) { 286 assert(getSCCNum(BB) == SccNum); 287 uint32_t BlockType = Inner; 288 289 if (llvm::any_of(predecessors(BB), [&](const BasicBlock *Pred) { 290 // Consider any block that is an entry point to the SCC as 291 // a header. 292 return getSCCNum(Pred) != SccNum; 293 })) 294 BlockType |= Header; 295 296 if (llvm::any_of(successors(BB), [&](const BasicBlock *Succ) { 297 return getSCCNum(Succ) != SccNum; 298 })) 299 BlockType |= Exiting; 300 301 // Lazily compute the set of headers for a given SCC and cache the results 302 // in the SccHeaderMap. 303 if (SccBlocks.size() <= static_cast<unsigned>(SccNum)) 304 SccBlocks.resize(SccNum + 1); 305 auto &SccBlockTypes = SccBlocks[SccNum]; 306 307 if (BlockType != Inner) { 308 bool IsInserted; 309 std::tie(std::ignore, IsInserted) = 310 SccBlockTypes.insert(std::make_pair(BB, BlockType)); 311 assert(IsInserted && "Duplicated block in SCC"); 312 } 313 } 314 315 BranchProbabilityInfo::LoopBlock::LoopBlock(const BasicBlock *BB, 316 const LoopInfo &LI, 317 const SccInfo &SccI) 318 : BB(BB) { 319 LD.first = LI.getLoopFor(BB); 320 if (!LD.first) { 321 LD.second = SccI.getSCCNum(BB); 322 } 323 } 324 325 bool BranchProbabilityInfo::isLoopEnteringEdge(const LoopEdge &Edge) const { 326 const auto &SrcBlock = Edge.first; 327 const auto &DstBlock = Edge.second; 328 return (DstBlock.getLoop() && 329 !DstBlock.getLoop()->contains(SrcBlock.getLoop())) || 330 // Assume that SCCs can't be nested. 331 (DstBlock.getSccNum() != -1 && 332 SrcBlock.getSccNum() != DstBlock.getSccNum()); 333 } 334 335 bool BranchProbabilityInfo::isLoopExitingEdge(const LoopEdge &Edge) const { 336 return isLoopEnteringEdge({Edge.second, Edge.first}); 337 } 338 339 bool BranchProbabilityInfo::isLoopEnteringExitingEdge( 340 const LoopEdge &Edge) const { 341 return isLoopEnteringEdge(Edge) || isLoopExitingEdge(Edge); 342 } 343 344 bool BranchProbabilityInfo::isLoopBackEdge(const LoopEdge &Edge) const { 345 const auto &SrcBlock = Edge.first; 346 const auto &DstBlock = Edge.second; 347 return SrcBlock.belongsToSameLoop(DstBlock) && 348 ((DstBlock.getLoop() && 349 DstBlock.getLoop()->getHeader() == DstBlock.getBlock()) || 350 (DstBlock.getSccNum() != -1 && 351 SccI->isSCCHeader(DstBlock.getBlock(), DstBlock.getSccNum()))); 352 } 353 354 void BranchProbabilityInfo::getLoopEnterBlocks( 355 const LoopBlock &LB, SmallVectorImpl<BasicBlock *> &Enters) const { 356 if (LB.getLoop()) { 357 auto *Header = LB.getLoop()->getHeader(); 358 Enters.append(pred_begin(Header), pred_end(Header)); 359 } else { 360 assert(LB.getSccNum() != -1 && "LB doesn't belong to any loop?"); 361 SccI->getSccEnterBlocks(LB.getSccNum(), Enters); 362 } 363 } 364 365 void BranchProbabilityInfo::getLoopExitBlocks( 366 const LoopBlock &LB, SmallVectorImpl<BasicBlock *> &Exits) const { 367 if (LB.getLoop()) { 368 LB.getLoop()->getExitBlocks(Exits); 369 } else { 370 assert(LB.getSccNum() != -1 && "LB doesn't belong to any loop?"); 371 SccI->getSccExitBlocks(LB.getSccNum(), Exits); 372 } 373 } 374 375 // Propagate existing explicit probabilities from either profile data or 376 // 'expect' intrinsic processing. Examine metadata against unreachable 377 // heuristic. The probability of the edge coming to unreachable block is 378 // set to min of metadata and unreachable heuristic. 379 bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { 380 const Instruction *TI = BB->getTerminator(); 381 assert(TI->getNumSuccessors() > 1 && "expected more than one successor!"); 382 if (!(isa<BranchInst>(TI) || isa<SwitchInst>(TI) || isa<IndirectBrInst>(TI) || 383 isa<InvokeInst>(TI))) 384 return false; 385 386 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof); 387 if (!WeightsNode) 388 return false; 389 390 // Check that the number of successors is manageable. 391 assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors"); 392 393 // Ensure there are weights for all of the successors. Note that the first 394 // operand to the metadata node is a name, not a weight. 395 if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1) 396 return false; 397 398 // Build up the final weights that will be used in a temporary buffer. 399 // Compute the sum of all weights to later decide whether they need to 400 // be scaled to fit in 32 bits. 401 uint64_t WeightSum = 0; 402 SmallVector<uint32_t, 2> Weights; 403 SmallVector<unsigned, 2> UnreachableIdxs; 404 SmallVector<unsigned, 2> ReachableIdxs; 405 406 extractBranchWeights(*TI, Weights); 407 for (unsigned I = 0, E = Weights.size(); I != E; ++I) { 408 WeightSum += Weights[I]; 409 const LoopBlock SrcLoopBB = getLoopBlock(BB); 410 const LoopBlock DstLoopBB = getLoopBlock(TI->getSuccessor(I)); 411 auto EstimatedWeight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB}); 412 if (EstimatedWeight && 413 *EstimatedWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE)) 414 UnreachableIdxs.push_back(I); 415 else 416 ReachableIdxs.push_back(I); 417 } 418 assert(Weights.size() == TI->getNumSuccessors() && "Checked above"); 419 420 // If the sum of weights does not fit in 32 bits, scale every weight down 421 // accordingly. 422 uint64_t ScalingFactor = 423 (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1; 424 425 if (ScalingFactor > 1) { 426 WeightSum = 0; 427 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) { 428 Weights[I] /= ScalingFactor; 429 WeightSum += Weights[I]; 430 } 431 } 432 assert(WeightSum <= UINT32_MAX && 433 "Expected weights to scale down to 32 bits"); 434 435 if (WeightSum == 0 || ReachableIdxs.size() == 0) { 436 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) 437 Weights[I] = 1; 438 WeightSum = TI->getNumSuccessors(); 439 } 440 441 // Set the probability. 442 SmallVector<BranchProbability, 2> BP; 443 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) 444 BP.push_back({ Weights[I], static_cast<uint32_t>(WeightSum) }); 445 446 // Examine the metadata against unreachable heuristic. 447 // If the unreachable heuristic is more strong then we use it for this edge. 448 if (UnreachableIdxs.size() == 0 || ReachableIdxs.size() == 0) { 449 setEdgeProbability(BB, BP); 450 return true; 451 } 452 453 auto UnreachableProb = UR_TAKEN_PROB; 454 for (auto I : UnreachableIdxs) 455 if (UnreachableProb < BP[I]) { 456 BP[I] = UnreachableProb; 457 } 458 459 // Sum of all edge probabilities must be 1.0. If we modified the probability 460 // of some edges then we must distribute the introduced difference over the 461 // reachable blocks. 462 // 463 // Proportional distribution: the relation between probabilities of the 464 // reachable edges is kept unchanged. That is for any reachable edges i and j: 465 // newBP[i] / newBP[j] == oldBP[i] / oldBP[j] => 466 // newBP[i] / oldBP[i] == newBP[j] / oldBP[j] == K 467 // Where K is independent of i,j. 468 // newBP[i] == oldBP[i] * K 469 // We need to find K. 470 // Make sum of all reachables of the left and right parts: 471 // sum_of_reachable(newBP) == K * sum_of_reachable(oldBP) 472 // Sum of newBP must be equal to 1.0: 473 // sum_of_reachable(newBP) + sum_of_unreachable(newBP) == 1.0 => 474 // sum_of_reachable(newBP) = 1.0 - sum_of_unreachable(newBP) 475 // Where sum_of_unreachable(newBP) is what has been just changed. 476 // Finally: 477 // K == sum_of_reachable(newBP) / sum_of_reachable(oldBP) => 478 // K == (1.0 - sum_of_unreachable(newBP)) / sum_of_reachable(oldBP) 479 BranchProbability NewUnreachableSum = BranchProbability::getZero(); 480 for (auto I : UnreachableIdxs) 481 NewUnreachableSum += BP[I]; 482 483 BranchProbability NewReachableSum = 484 BranchProbability::getOne() - NewUnreachableSum; 485 486 BranchProbability OldReachableSum = BranchProbability::getZero(); 487 for (auto I : ReachableIdxs) 488 OldReachableSum += BP[I]; 489 490 if (OldReachableSum != NewReachableSum) { // Anything to dsitribute? 491 if (OldReachableSum.isZero()) { 492 // If all oldBP[i] are zeroes then the proportional distribution results 493 // in all zero probabilities and the error stays big. In this case we 494 // evenly spread NewReachableSum over the reachable edges. 495 BranchProbability PerEdge = NewReachableSum / ReachableIdxs.size(); 496 for (auto I : ReachableIdxs) 497 BP[I] = PerEdge; 498 } else { 499 for (auto I : ReachableIdxs) { 500 // We use uint64_t to avoid double rounding error of the following 501 // calculation: BP[i] = BP[i] * NewReachableSum / OldReachableSum 502 // The formula is taken from the private constructor 503 // BranchProbability(uint32_t Numerator, uint32_t Denominator) 504 uint64_t Mul = static_cast<uint64_t>(NewReachableSum.getNumerator()) * 505 BP[I].getNumerator(); 506 uint32_t Div = static_cast<uint32_t>( 507 divideNearest(Mul, OldReachableSum.getNumerator())); 508 BP[I] = BranchProbability::getRaw(Div); 509 } 510 } 511 } 512 513 setEdgeProbability(BB, BP); 514 515 return true; 516 } 517 518 // Calculate Edge Weights using "Pointer Heuristics". Predict a comparison 519 // between two pointer or pointer and NULL will fail. 520 bool BranchProbabilityInfo::calcPointerHeuristics(const BasicBlock *BB) { 521 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 522 if (!BI || !BI->isConditional()) 523 return false; 524 525 Value *Cond = BI->getCondition(); 526 ICmpInst *CI = dyn_cast<ICmpInst>(Cond); 527 if (!CI || !CI->isEquality()) 528 return false; 529 530 Value *LHS = CI->getOperand(0); 531 532 if (!LHS->getType()->isPointerTy()) 533 return false; 534 535 assert(CI->getOperand(1)->getType()->isPointerTy()); 536 537 auto Search = PointerTable.find(CI->getPredicate()); 538 if (Search == PointerTable.end()) 539 return false; 540 setEdgeProbability(BB, Search->second); 541 return true; 542 } 543 544 // Compute the unlikely successors to the block BB in the loop L, specifically 545 // those that are unlikely because this is a loop, and add them to the 546 // UnlikelyBlocks set. 547 static void 548 computeUnlikelySuccessors(const BasicBlock *BB, Loop *L, 549 SmallPtrSetImpl<const BasicBlock*> &UnlikelyBlocks) { 550 // Sometimes in a loop we have a branch whose condition is made false by 551 // taking it. This is typically something like 552 // int n = 0; 553 // while (...) { 554 // if (++n >= MAX) { 555 // n = 0; 556 // } 557 // } 558 // In this sort of situation taking the branch means that at the very least it 559 // won't be taken again in the next iteration of the loop, so we should 560 // consider it less likely than a typical branch. 561 // 562 // We detect this by looking back through the graph of PHI nodes that sets the 563 // value that the condition depends on, and seeing if we can reach a successor 564 // block which can be determined to make the condition false. 565 // 566 // FIXME: We currently consider unlikely blocks to be half as likely as other 567 // blocks, but if we consider the example above the likelyhood is actually 568 // 1/MAX. We could therefore be more precise in how unlikely we consider 569 // blocks to be, but it would require more careful examination of the form 570 // of the comparison expression. 571 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 572 if (!BI || !BI->isConditional()) 573 return; 574 575 // Check if the branch is based on an instruction compared with a constant 576 CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition()); 577 if (!CI || !isa<Instruction>(CI->getOperand(0)) || 578 !isa<Constant>(CI->getOperand(1))) 579 return; 580 581 // Either the instruction must be a PHI, or a chain of operations involving 582 // constants that ends in a PHI which we can then collapse into a single value 583 // if the PHI value is known. 584 Instruction *CmpLHS = dyn_cast<Instruction>(CI->getOperand(0)); 585 PHINode *CmpPHI = dyn_cast<PHINode>(CmpLHS); 586 Constant *CmpConst = dyn_cast<Constant>(CI->getOperand(1)); 587 // Collect the instructions until we hit a PHI 588 SmallVector<BinaryOperator *, 1> InstChain; 589 while (!CmpPHI && CmpLHS && isa<BinaryOperator>(CmpLHS) && 590 isa<Constant>(CmpLHS->getOperand(1))) { 591 // Stop if the chain extends outside of the loop 592 if (!L->contains(CmpLHS)) 593 return; 594 InstChain.push_back(cast<BinaryOperator>(CmpLHS)); 595 CmpLHS = dyn_cast<Instruction>(CmpLHS->getOperand(0)); 596 if (CmpLHS) 597 CmpPHI = dyn_cast<PHINode>(CmpLHS); 598 } 599 if (!CmpPHI || !L->contains(CmpPHI)) 600 return; 601 602 // Trace the phi node to find all values that come from successors of BB 603 SmallPtrSet<PHINode*, 8> VisitedInsts; 604 SmallVector<PHINode*, 8> WorkList; 605 WorkList.push_back(CmpPHI); 606 VisitedInsts.insert(CmpPHI); 607 while (!WorkList.empty()) { 608 PHINode *P = WorkList.pop_back_val(); 609 for (BasicBlock *B : P->blocks()) { 610 // Skip blocks that aren't part of the loop 611 if (!L->contains(B)) 612 continue; 613 Value *V = P->getIncomingValueForBlock(B); 614 // If the source is a PHI add it to the work list if we haven't 615 // already visited it. 616 if (PHINode *PN = dyn_cast<PHINode>(V)) { 617 if (VisitedInsts.insert(PN).second) 618 WorkList.push_back(PN); 619 continue; 620 } 621 // If this incoming value is a constant and B is a successor of BB, then 622 // we can constant-evaluate the compare to see if it makes the branch be 623 // taken or not. 624 Constant *CmpLHSConst = dyn_cast<Constant>(V); 625 if (!CmpLHSConst || !llvm::is_contained(successors(BB), B)) 626 continue; 627 // First collapse InstChain 628 const DataLayout &DL = BB->getModule()->getDataLayout(); 629 for (Instruction *I : llvm::reverse(InstChain)) { 630 CmpLHSConst = ConstantFoldBinaryOpOperands( 631 I->getOpcode(), CmpLHSConst, cast<Constant>(I->getOperand(1)), DL); 632 if (!CmpLHSConst) 633 break; 634 } 635 if (!CmpLHSConst) 636 continue; 637 // Now constant-evaluate the compare 638 Constant *Result = ConstantExpr::getCompare(CI->getPredicate(), 639 CmpLHSConst, CmpConst, true); 640 // If the result means we don't branch to the block then that block is 641 // unlikely. 642 if (Result && 643 ((Result->isZeroValue() && B == BI->getSuccessor(0)) || 644 (Result->isOneValue() && B == BI->getSuccessor(1)))) 645 UnlikelyBlocks.insert(B); 646 } 647 } 648 } 649 650 Optional<uint32_t> 651 BranchProbabilityInfo::getEstimatedBlockWeight(const BasicBlock *BB) const { 652 auto WeightIt = EstimatedBlockWeight.find(BB); 653 if (WeightIt == EstimatedBlockWeight.end()) 654 return None; 655 return WeightIt->second; 656 } 657 658 Optional<uint32_t> 659 BranchProbabilityInfo::getEstimatedLoopWeight(const LoopData &L) const { 660 auto WeightIt = EstimatedLoopWeight.find(L); 661 if (WeightIt == EstimatedLoopWeight.end()) 662 return None; 663 return WeightIt->second; 664 } 665 666 Optional<uint32_t> 667 BranchProbabilityInfo::getEstimatedEdgeWeight(const LoopEdge &Edge) const { 668 // For edges entering a loop take weight of a loop rather than an individual 669 // block in the loop. 670 return isLoopEnteringEdge(Edge) 671 ? getEstimatedLoopWeight(Edge.second.getLoopData()) 672 : getEstimatedBlockWeight(Edge.second.getBlock()); 673 } 674 675 template <class IterT> 676 Optional<uint32_t> BranchProbabilityInfo::getMaxEstimatedEdgeWeight( 677 const LoopBlock &SrcLoopBB, iterator_range<IterT> Successors) const { 678 SmallVector<uint32_t, 4> Weights; 679 Optional<uint32_t> MaxWeight; 680 for (const BasicBlock *DstBB : Successors) { 681 const LoopBlock DstLoopBB = getLoopBlock(DstBB); 682 auto Weight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB}); 683 684 if (!Weight) 685 return None; 686 687 if (!MaxWeight || *MaxWeight < *Weight) 688 MaxWeight = Weight; 689 } 690 691 return MaxWeight; 692 } 693 694 // Updates \p LoopBB's weight and returns true. If \p LoopBB has already 695 // an associated weight it is unchanged and false is returned. 696 // 697 // Please note by the algorithm the weight is not expected to change once set 698 // thus 'false' status is used to track visited blocks. 699 bool BranchProbabilityInfo::updateEstimatedBlockWeight( 700 LoopBlock &LoopBB, uint32_t BBWeight, 701 SmallVectorImpl<BasicBlock *> &BlockWorkList, 702 SmallVectorImpl<LoopBlock> &LoopWorkList) { 703 BasicBlock *BB = LoopBB.getBlock(); 704 705 // In general, weight is assigned to a block when it has final value and 706 // can't/shouldn't be changed. However, there are cases when a block 707 // inherently has several (possibly "contradicting") weights. For example, 708 // "unwind" block may also contain "cold" call. In that case the first 709 // set weight is favored and all consequent weights are ignored. 710 if (!EstimatedBlockWeight.insert({BB, BBWeight}).second) 711 return false; 712 713 for (BasicBlock *PredBlock : predecessors(BB)) { 714 LoopBlock PredLoop = getLoopBlock(PredBlock); 715 // Add affected block/loop to a working list. 716 if (isLoopExitingEdge({PredLoop, LoopBB})) { 717 if (!EstimatedLoopWeight.count(PredLoop.getLoopData())) 718 LoopWorkList.push_back(PredLoop); 719 } else if (!EstimatedBlockWeight.count(PredBlock)) 720 BlockWorkList.push_back(PredBlock); 721 } 722 return true; 723 } 724 725 // Starting from \p BB traverse through dominator blocks and assign \p BBWeight 726 // to all such blocks that are post dominated by \BB. In other words to all 727 // blocks that the one is executed if and only if another one is executed. 728 // Importantly, we skip loops here for two reasons. First weights of blocks in 729 // a loop should be scaled by trip count (yet possibly unknown). Second there is 730 // no any value in doing that because that doesn't give any additional 731 // information regarding distribution of probabilities inside the loop. 732 // Exception is loop 'enter' and 'exit' edges that are handled in a special way 733 // at calcEstimatedHeuristics. 734 // 735 // In addition, \p WorkList is populated with basic blocks if at leas one 736 // successor has updated estimated weight. 737 void BranchProbabilityInfo::propagateEstimatedBlockWeight( 738 const LoopBlock &LoopBB, DominatorTree *DT, PostDominatorTree *PDT, 739 uint32_t BBWeight, SmallVectorImpl<BasicBlock *> &BlockWorkList, 740 SmallVectorImpl<LoopBlock> &LoopWorkList) { 741 const BasicBlock *BB = LoopBB.getBlock(); 742 const auto *DTStartNode = DT->getNode(BB); 743 const auto *PDTStartNode = PDT->getNode(BB); 744 745 // TODO: Consider propagating weight down the domination line as well. 746 for (const auto *DTNode = DTStartNode; DTNode != nullptr; 747 DTNode = DTNode->getIDom()) { 748 auto *DomBB = DTNode->getBlock(); 749 // Consider blocks which lie on one 'line'. 750 if (!PDT->dominates(PDTStartNode, PDT->getNode(DomBB))) 751 // If BB doesn't post dominate DomBB it will not post dominate dominators 752 // of DomBB as well. 753 break; 754 755 LoopBlock DomLoopBB = getLoopBlock(DomBB); 756 const LoopEdge Edge{DomLoopBB, LoopBB}; 757 // Don't propagate weight to blocks belonging to different loops. 758 if (!isLoopEnteringExitingEdge(Edge)) { 759 if (!updateEstimatedBlockWeight(DomLoopBB, BBWeight, BlockWorkList, 760 LoopWorkList)) 761 // If DomBB has weight set then all it's predecessors are already 762 // processed (since we propagate weight up to the top of IR each time). 763 break; 764 } else if (isLoopExitingEdge(Edge)) { 765 LoopWorkList.push_back(DomLoopBB); 766 } 767 } 768 } 769 770 Optional<uint32_t> BranchProbabilityInfo::getInitialEstimatedBlockWeight( 771 const BasicBlock *BB) { 772 // Returns true if \p BB has call marked with "NoReturn" attribute. 773 auto hasNoReturn = [&](const BasicBlock *BB) { 774 for (const auto &I : reverse(*BB)) 775 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 776 if (CI->hasFnAttr(Attribute::NoReturn)) 777 return true; 778 779 return false; 780 }; 781 782 // Important note regarding the order of checks. They are ordered by weight 783 // from lowest to highest. Doing that allows to avoid "unstable" results 784 // when several conditions heuristics can be applied simultaneously. 785 if (isa<UnreachableInst>(BB->getTerminator()) || 786 // If this block is terminated by a call to 787 // @llvm.experimental.deoptimize then treat it like an unreachable 788 // since it is expected to practically never execute. 789 // TODO: Should we actually treat as never returning call? 790 BB->getTerminatingDeoptimizeCall()) 791 return hasNoReturn(BB) 792 ? static_cast<uint32_t>(BlockExecWeight::NORETURN) 793 : static_cast<uint32_t>(BlockExecWeight::UNREACHABLE); 794 795 // Check if the block is 'unwind' handler of some invoke instruction. 796 for (const auto *Pred : predecessors(BB)) 797 if (Pred) 798 if (const auto *II = dyn_cast<InvokeInst>(Pred->getTerminator())) 799 if (II->getUnwindDest() == BB) 800 return static_cast<uint32_t>(BlockExecWeight::UNWIND); 801 802 // Check if the block contains 'cold' call. 803 for (const auto &I : *BB) 804 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 805 if (CI->hasFnAttr(Attribute::Cold)) 806 return static_cast<uint32_t>(BlockExecWeight::COLD); 807 808 return None; 809 } 810 811 // Does RPO traversal over all blocks in \p F and assigns weights to 812 // 'unreachable', 'noreturn', 'cold', 'unwind' blocks. In addition it does its 813 // best to propagate the weight to up/down the IR. 814 void BranchProbabilityInfo::computeEestimateBlockWeight( 815 const Function &F, DominatorTree *DT, PostDominatorTree *PDT) { 816 SmallVector<BasicBlock *, 8> BlockWorkList; 817 SmallVector<LoopBlock, 8> LoopWorkList; 818 819 // By doing RPO we make sure that all predecessors already have weights 820 // calculated before visiting theirs successors. 821 ReversePostOrderTraversal<const Function *> RPOT(&F); 822 for (const auto *BB : RPOT) 823 if (auto BBWeight = getInitialEstimatedBlockWeight(BB)) 824 // If we were able to find estimated weight for the block set it to this 825 // block and propagate up the IR. 826 propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, BBWeight.value(), 827 BlockWorkList, LoopWorkList); 828 829 // BlockWorklist/LoopWorkList contains blocks/loops with at least one 830 // successor/exit having estimated weight. Try to propagate weight to such 831 // blocks/loops from successors/exits. 832 // Process loops and blocks. Order is not important. 833 do { 834 while (!LoopWorkList.empty()) { 835 const LoopBlock LoopBB = LoopWorkList.pop_back_val(); 836 837 if (EstimatedLoopWeight.count(LoopBB.getLoopData())) 838 continue; 839 840 SmallVector<BasicBlock *, 4> Exits; 841 getLoopExitBlocks(LoopBB, Exits); 842 auto LoopWeight = getMaxEstimatedEdgeWeight( 843 LoopBB, make_range(Exits.begin(), Exits.end())); 844 845 if (LoopWeight) { 846 // If we never exit the loop then we can enter it once at maximum. 847 if (LoopWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE)) 848 LoopWeight = static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO); 849 850 EstimatedLoopWeight.insert({LoopBB.getLoopData(), *LoopWeight}); 851 // Add all blocks entering the loop into working list. 852 getLoopEnterBlocks(LoopBB, BlockWorkList); 853 } 854 } 855 856 while (!BlockWorkList.empty()) { 857 // We can reach here only if BlockWorkList is not empty. 858 const BasicBlock *BB = BlockWorkList.pop_back_val(); 859 if (EstimatedBlockWeight.count(BB)) 860 continue; 861 862 // We take maximum over all weights of successors. In other words we take 863 // weight of "hot" path. In theory we can probably find a better function 864 // which gives higher accuracy results (comparing to "maximum") but I 865 // can't 866 // think of any right now. And I doubt it will make any difference in 867 // practice. 868 const LoopBlock LoopBB = getLoopBlock(BB); 869 auto MaxWeight = getMaxEstimatedEdgeWeight(LoopBB, successors(BB)); 870 871 if (MaxWeight) 872 propagateEstimatedBlockWeight(LoopBB, DT, PDT, *MaxWeight, 873 BlockWorkList, LoopWorkList); 874 } 875 } while (!BlockWorkList.empty() || !LoopWorkList.empty()); 876 } 877 878 // Calculate edge probabilities based on block's estimated weight. 879 // Note that gathered weights were not scaled for loops. Thus edges entering 880 // and exiting loops requires special processing. 881 bool BranchProbabilityInfo::calcEstimatedHeuristics(const BasicBlock *BB) { 882 assert(BB->getTerminator()->getNumSuccessors() > 1 && 883 "expected more than one successor!"); 884 885 const LoopBlock LoopBB = getLoopBlock(BB); 886 887 SmallPtrSet<const BasicBlock *, 8> UnlikelyBlocks; 888 uint32_t TC = LBH_TAKEN_WEIGHT / LBH_NONTAKEN_WEIGHT; 889 if (LoopBB.getLoop()) 890 computeUnlikelySuccessors(BB, LoopBB.getLoop(), UnlikelyBlocks); 891 892 // Changed to 'true' if at least one successor has estimated weight. 893 bool FoundEstimatedWeight = false; 894 SmallVector<uint32_t, 4> SuccWeights; 895 uint64_t TotalWeight = 0; 896 // Go over all successors of BB and put their weights into SuccWeights. 897 for (const BasicBlock *SuccBB : successors(BB)) { 898 Optional<uint32_t> Weight; 899 const LoopBlock SuccLoopBB = getLoopBlock(SuccBB); 900 const LoopEdge Edge{LoopBB, SuccLoopBB}; 901 902 Weight = getEstimatedEdgeWeight(Edge); 903 904 if (isLoopExitingEdge(Edge) && 905 // Avoid adjustment of ZERO weight since it should remain unchanged. 906 Weight != static_cast<uint32_t>(BlockExecWeight::ZERO)) { 907 // Scale down loop exiting weight by trip count. 908 Weight = std::max( 909 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO), 910 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)) / 911 TC); 912 } 913 bool IsUnlikelyEdge = LoopBB.getLoop() && UnlikelyBlocks.contains(SuccBB); 914 if (IsUnlikelyEdge && 915 // Avoid adjustment of ZERO weight since it should remain unchanged. 916 Weight != static_cast<uint32_t>(BlockExecWeight::ZERO)) { 917 // 'Unlikely' blocks have twice lower weight. 918 Weight = std::max( 919 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO), 920 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)) / 2); 921 } 922 923 if (Weight) 924 FoundEstimatedWeight = true; 925 926 auto WeightVal = 927 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)); 928 TotalWeight += WeightVal; 929 SuccWeights.push_back(WeightVal); 930 } 931 932 // If non of blocks have estimated weight bail out. 933 // If TotalWeight is 0 that means weight of each successor is 0 as well and 934 // equally likely. Bail out early to not deal with devision by zero. 935 if (!FoundEstimatedWeight || TotalWeight == 0) 936 return false; 937 938 assert(SuccWeights.size() == succ_size(BB) && "Missed successor?"); 939 const unsigned SuccCount = SuccWeights.size(); 940 941 // If the sum of weights does not fit in 32 bits, scale every weight down 942 // accordingly. 943 if (TotalWeight > UINT32_MAX) { 944 uint64_t ScalingFactor = TotalWeight / UINT32_MAX + 1; 945 TotalWeight = 0; 946 for (unsigned Idx = 0; Idx < SuccCount; ++Idx) { 947 SuccWeights[Idx] /= ScalingFactor; 948 if (SuccWeights[Idx] == static_cast<uint32_t>(BlockExecWeight::ZERO)) 949 SuccWeights[Idx] = 950 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO); 951 TotalWeight += SuccWeights[Idx]; 952 } 953 assert(TotalWeight <= UINT32_MAX && "Total weight overflows"); 954 } 955 956 // Finally set probabilities to edges according to estimated block weights. 957 SmallVector<BranchProbability, 4> EdgeProbabilities( 958 SuccCount, BranchProbability::getUnknown()); 959 960 for (unsigned Idx = 0; Idx < SuccCount; ++Idx) { 961 EdgeProbabilities[Idx] = 962 BranchProbability(SuccWeights[Idx], (uint32_t)TotalWeight); 963 } 964 setEdgeProbability(BB, EdgeProbabilities); 965 return true; 966 } 967 968 bool BranchProbabilityInfo::calcZeroHeuristics(const BasicBlock *BB, 969 const TargetLibraryInfo *TLI) { 970 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 971 if (!BI || !BI->isConditional()) 972 return false; 973 974 Value *Cond = BI->getCondition(); 975 ICmpInst *CI = dyn_cast<ICmpInst>(Cond); 976 if (!CI) 977 return false; 978 979 auto GetConstantInt = [](Value *V) { 980 if (auto *I = dyn_cast<BitCastInst>(V)) 981 return dyn_cast<ConstantInt>(I->getOperand(0)); 982 return dyn_cast<ConstantInt>(V); 983 }; 984 985 Value *RHS = CI->getOperand(1); 986 ConstantInt *CV = GetConstantInt(RHS); 987 if (!CV) 988 return false; 989 990 // If the LHS is the result of AND'ing a value with a single bit bitmask, 991 // we don't have information about probabilities. 992 if (Instruction *LHS = dyn_cast<Instruction>(CI->getOperand(0))) 993 if (LHS->getOpcode() == Instruction::And) 994 if (ConstantInt *AndRHS = GetConstantInt(LHS->getOperand(1))) 995 if (AndRHS->getValue().isPowerOf2()) 996 return false; 997 998 // Check if the LHS is the return value of a library function 999 LibFunc Func = NumLibFuncs; 1000 if (TLI) 1001 if (CallInst *Call = dyn_cast<CallInst>(CI->getOperand(0))) 1002 if (Function *CalledFn = Call->getCalledFunction()) 1003 TLI->getLibFunc(*CalledFn, Func); 1004 1005 ProbabilityTable::const_iterator Search; 1006 if (Func == LibFunc_strcasecmp || 1007 Func == LibFunc_strcmp || 1008 Func == LibFunc_strncasecmp || 1009 Func == LibFunc_strncmp || 1010 Func == LibFunc_memcmp || 1011 Func == LibFunc_bcmp) { 1012 Search = ICmpWithLibCallTable.find(CI->getPredicate()); 1013 if (Search == ICmpWithLibCallTable.end()) 1014 return false; 1015 } else if (CV->isZero()) { 1016 Search = ICmpWithZeroTable.find(CI->getPredicate()); 1017 if (Search == ICmpWithZeroTable.end()) 1018 return false; 1019 } else if (CV->isOne()) { 1020 Search = ICmpWithOneTable.find(CI->getPredicate()); 1021 if (Search == ICmpWithOneTable.end()) 1022 return false; 1023 } else if (CV->isMinusOne()) { 1024 Search = ICmpWithMinusOneTable.find(CI->getPredicate()); 1025 if (Search == ICmpWithMinusOneTable.end()) 1026 return false; 1027 } else { 1028 return false; 1029 } 1030 1031 setEdgeProbability(BB, Search->second); 1032 return true; 1033 } 1034 1035 bool BranchProbabilityInfo::calcFloatingPointHeuristics(const BasicBlock *BB) { 1036 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1037 if (!BI || !BI->isConditional()) 1038 return false; 1039 1040 Value *Cond = BI->getCondition(); 1041 FCmpInst *FCmp = dyn_cast<FCmpInst>(Cond); 1042 if (!FCmp) 1043 return false; 1044 1045 ProbabilityList ProbList; 1046 if (FCmp->isEquality()) { 1047 ProbList = !FCmp->isTrueWhenEqual() ? 1048 // f1 == f2 -> Unlikely 1049 ProbabilityList({FPTakenProb, FPUntakenProb}) : 1050 // f1 != f2 -> Likely 1051 ProbabilityList({FPUntakenProb, FPTakenProb}); 1052 } else { 1053 auto Search = FCmpTable.find(FCmp->getPredicate()); 1054 if (Search == FCmpTable.end()) 1055 return false; 1056 ProbList = Search->second; 1057 } 1058 1059 setEdgeProbability(BB, ProbList); 1060 return true; 1061 } 1062 1063 void BranchProbabilityInfo::releaseMemory() { 1064 Probs.clear(); 1065 Handles.clear(); 1066 } 1067 1068 bool BranchProbabilityInfo::invalidate(Function &, const PreservedAnalyses &PA, 1069 FunctionAnalysisManager::Invalidator &) { 1070 // Check whether the analysis, all analyses on functions, or the function's 1071 // CFG have been preserved. 1072 auto PAC = PA.getChecker<BranchProbabilityAnalysis>(); 1073 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>() || 1074 PAC.preservedSet<CFGAnalyses>()); 1075 } 1076 1077 void BranchProbabilityInfo::print(raw_ostream &OS) const { 1078 OS << "---- Branch Probabilities ----\n"; 1079 // We print the probabilities from the last function the analysis ran over, 1080 // or the function it is currently running over. 1081 assert(LastF && "Cannot print prior to running over a function"); 1082 for (const auto &BI : *LastF) { 1083 for (const BasicBlock *Succ : successors(&BI)) 1084 printEdgeProbability(OS << " ", &BI, Succ); 1085 } 1086 } 1087 1088 bool BranchProbabilityInfo:: 1089 isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const { 1090 // Hot probability is at least 4/5 = 80% 1091 // FIXME: Compare against a static "hot" BranchProbability. 1092 return getEdgeProbability(Src, Dst) > BranchProbability(4, 5); 1093 } 1094 1095 /// Get the raw edge probability for the edge. If can't find it, return a 1096 /// default probability 1/N where N is the number of successors. Here an edge is 1097 /// specified using PredBlock and an 1098 /// index to the successors. 1099 BranchProbability 1100 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1101 unsigned IndexInSuccessors) const { 1102 auto I = Probs.find(std::make_pair(Src, IndexInSuccessors)); 1103 assert((Probs.end() == Probs.find(std::make_pair(Src, 0))) == 1104 (Probs.end() == I) && 1105 "Probability for I-th successor must always be defined along with the " 1106 "probability for the first successor"); 1107 1108 if (I != Probs.end()) 1109 return I->second; 1110 1111 return {1, static_cast<uint32_t>(succ_size(Src))}; 1112 } 1113 1114 BranchProbability 1115 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1116 const_succ_iterator Dst) const { 1117 return getEdgeProbability(Src, Dst.getSuccessorIndex()); 1118 } 1119 1120 /// Get the raw edge probability calculated for the block pair. This returns the 1121 /// sum of all raw edge probabilities from Src to Dst. 1122 BranchProbability 1123 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1124 const BasicBlock *Dst) const { 1125 if (!Probs.count(std::make_pair(Src, 0))) 1126 return BranchProbability(llvm::count(successors(Src), Dst), succ_size(Src)); 1127 1128 auto Prob = BranchProbability::getZero(); 1129 for (const_succ_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I) 1130 if (*I == Dst) 1131 Prob += Probs.find(std::make_pair(Src, I.getSuccessorIndex()))->second; 1132 1133 return Prob; 1134 } 1135 1136 /// Set the edge probability for all edges at once. 1137 void BranchProbabilityInfo::setEdgeProbability( 1138 const BasicBlock *Src, const SmallVectorImpl<BranchProbability> &Probs) { 1139 assert(Src->getTerminator()->getNumSuccessors() == Probs.size()); 1140 eraseBlock(Src); // Erase stale data if any. 1141 if (Probs.size() == 0) 1142 return; // Nothing to set. 1143 1144 Handles.insert(BasicBlockCallbackVH(Src, this)); 1145 uint64_t TotalNumerator = 0; 1146 for (unsigned SuccIdx = 0; SuccIdx < Probs.size(); ++SuccIdx) { 1147 this->Probs[std::make_pair(Src, SuccIdx)] = Probs[SuccIdx]; 1148 LLVM_DEBUG(dbgs() << "set edge " << Src->getName() << " -> " << SuccIdx 1149 << " successor probability to " << Probs[SuccIdx] 1150 << "\n"); 1151 TotalNumerator += Probs[SuccIdx].getNumerator(); 1152 } 1153 1154 // Because of rounding errors the total probability cannot be checked to be 1155 // 1.0 exactly. That is TotalNumerator == BranchProbability::getDenominator. 1156 // Instead, every single probability in Probs must be as accurate as possible. 1157 // This results in error 1/denominator at most, thus the total absolute error 1158 // should be within Probs.size / BranchProbability::getDenominator. 1159 assert(TotalNumerator <= BranchProbability::getDenominator() + Probs.size()); 1160 assert(TotalNumerator >= BranchProbability::getDenominator() - Probs.size()); 1161 (void)TotalNumerator; 1162 } 1163 1164 void BranchProbabilityInfo::copyEdgeProbabilities(BasicBlock *Src, 1165 BasicBlock *Dst) { 1166 eraseBlock(Dst); // Erase stale data if any. 1167 unsigned NumSuccessors = Src->getTerminator()->getNumSuccessors(); 1168 assert(NumSuccessors == Dst->getTerminator()->getNumSuccessors()); 1169 if (NumSuccessors == 0) 1170 return; // Nothing to set. 1171 if (this->Probs.find(std::make_pair(Src, 0)) == this->Probs.end()) 1172 return; // No probability is set for edges from Src. Keep the same for Dst. 1173 1174 Handles.insert(BasicBlockCallbackVH(Dst, this)); 1175 for (unsigned SuccIdx = 0; SuccIdx < NumSuccessors; ++SuccIdx) { 1176 auto Prob = this->Probs[std::make_pair(Src, SuccIdx)]; 1177 this->Probs[std::make_pair(Dst, SuccIdx)] = Prob; 1178 LLVM_DEBUG(dbgs() << "set edge " << Dst->getName() << " -> " << SuccIdx 1179 << " successor probability to " << Prob << "\n"); 1180 } 1181 } 1182 1183 raw_ostream & 1184 BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS, 1185 const BasicBlock *Src, 1186 const BasicBlock *Dst) const { 1187 const BranchProbability Prob = getEdgeProbability(Src, Dst); 1188 OS << "edge " << Src->getName() << " -> " << Dst->getName() 1189 << " probability is " << Prob 1190 << (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n"); 1191 1192 return OS; 1193 } 1194 1195 void BranchProbabilityInfo::eraseBlock(const BasicBlock *BB) { 1196 LLVM_DEBUG(dbgs() << "eraseBlock " << BB->getName() << "\n"); 1197 1198 // Note that we cannot use successors of BB because the terminator of BB may 1199 // have changed when eraseBlock is called as a BasicBlockCallbackVH callback. 1200 // Instead we remove prob data for the block by iterating successors by their 1201 // indices from 0 till the last which exists. There could not be prob data for 1202 // a pair (BB, N) if there is no data for (BB, N-1) because the data is always 1203 // set for all successors from 0 to M at once by the method 1204 // setEdgeProbability(). 1205 Handles.erase(BasicBlockCallbackVH(BB, this)); 1206 for (unsigned I = 0;; ++I) { 1207 auto MapI = Probs.find(std::make_pair(BB, I)); 1208 if (MapI == Probs.end()) { 1209 assert(Probs.count(std::make_pair(BB, I + 1)) == 0 && 1210 "Must be no more successors"); 1211 return; 1212 } 1213 Probs.erase(MapI); 1214 } 1215 } 1216 1217 void BranchProbabilityInfo::calculate(const Function &F, const LoopInfo &LoopI, 1218 const TargetLibraryInfo *TLI, 1219 DominatorTree *DT, 1220 PostDominatorTree *PDT) { 1221 LLVM_DEBUG(dbgs() << "---- Branch Probability Info : " << F.getName() 1222 << " ----\n\n"); 1223 LastF = &F; // Store the last function we ran on for printing. 1224 LI = &LoopI; 1225 1226 SccI = std::make_unique<SccInfo>(F); 1227 1228 assert(EstimatedBlockWeight.empty()); 1229 assert(EstimatedLoopWeight.empty()); 1230 1231 std::unique_ptr<DominatorTree> DTPtr; 1232 std::unique_ptr<PostDominatorTree> PDTPtr; 1233 1234 if (!DT) { 1235 DTPtr = std::make_unique<DominatorTree>(const_cast<Function &>(F)); 1236 DT = DTPtr.get(); 1237 } 1238 1239 if (!PDT) { 1240 PDTPtr = std::make_unique<PostDominatorTree>(const_cast<Function &>(F)); 1241 PDT = PDTPtr.get(); 1242 } 1243 1244 computeEestimateBlockWeight(F, DT, PDT); 1245 1246 // Walk the basic blocks in post-order so that we can build up state about 1247 // the successors of a block iteratively. 1248 for (const auto *BB : post_order(&F.getEntryBlock())) { 1249 LLVM_DEBUG(dbgs() << "Computing probabilities for " << BB->getName() 1250 << "\n"); 1251 // If there is no at least two successors, no sense to set probability. 1252 if (BB->getTerminator()->getNumSuccessors() < 2) 1253 continue; 1254 if (calcMetadataWeights(BB)) 1255 continue; 1256 if (calcEstimatedHeuristics(BB)) 1257 continue; 1258 if (calcPointerHeuristics(BB)) 1259 continue; 1260 if (calcZeroHeuristics(BB, TLI)) 1261 continue; 1262 if (calcFloatingPointHeuristics(BB)) 1263 continue; 1264 } 1265 1266 EstimatedLoopWeight.clear(); 1267 EstimatedBlockWeight.clear(); 1268 SccI.reset(); 1269 1270 if (PrintBranchProb && 1271 (PrintBranchProbFuncName.empty() || 1272 F.getName().equals(PrintBranchProbFuncName))) { 1273 print(dbgs()); 1274 } 1275 } 1276 1277 void BranchProbabilityInfoWrapperPass::getAnalysisUsage( 1278 AnalysisUsage &AU) const { 1279 // We require DT so it's available when LI is available. The LI updating code 1280 // asserts that DT is also present so if we don't make sure that we have DT 1281 // here, that assert will trigger. 1282 AU.addRequired<DominatorTreeWrapperPass>(); 1283 AU.addRequired<LoopInfoWrapperPass>(); 1284 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1285 AU.addRequired<DominatorTreeWrapperPass>(); 1286 AU.addRequired<PostDominatorTreeWrapperPass>(); 1287 AU.setPreservesAll(); 1288 } 1289 1290 bool BranchProbabilityInfoWrapperPass::runOnFunction(Function &F) { 1291 const LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1292 const TargetLibraryInfo &TLI = 1293 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1294 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1295 PostDominatorTree &PDT = 1296 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); 1297 BPI.calculate(F, LI, &TLI, &DT, &PDT); 1298 return false; 1299 } 1300 1301 void BranchProbabilityInfoWrapperPass::releaseMemory() { BPI.releaseMemory(); } 1302 1303 void BranchProbabilityInfoWrapperPass::print(raw_ostream &OS, 1304 const Module *) const { 1305 BPI.print(OS); 1306 } 1307 1308 AnalysisKey BranchProbabilityAnalysis::Key; 1309 BranchProbabilityInfo 1310 BranchProbabilityAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1311 BranchProbabilityInfo BPI; 1312 BPI.calculate(F, AM.getResult<LoopAnalysis>(F), 1313 &AM.getResult<TargetLibraryAnalysis>(F), 1314 &AM.getResult<DominatorTreeAnalysis>(F), 1315 &AM.getResult<PostDominatorTreeAnalysis>(F)); 1316 return BPI; 1317 } 1318 1319 PreservedAnalyses 1320 BranchProbabilityPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 1321 OS << "Printing analysis results of BPI for function " 1322 << "'" << F.getName() << "':" 1323 << "\n"; 1324 AM.getResult<BranchProbabilityAnalysis>(F).print(OS); 1325 return PreservedAnalyses::all(); 1326 } 1327