1 //===- BranchProbabilityInfo.cpp - Branch Probability Analysis ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Loops should be simplified before this analysis. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/BranchProbabilityInfo.h" 14 #include "llvm/ADT/PostOrderIterator.h" 15 #include "llvm/ADT/SCCIterator.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/LoopInfo.h" 19 #include "llvm/Analysis/PostDominators.h" 20 #include "llvm/Analysis/TargetLibraryInfo.h" 21 #include "llvm/IR/Attributes.h" 22 #include "llvm/IR/BasicBlock.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/InstrTypes.h" 28 #include "llvm/IR/Instruction.h" 29 #include "llvm/IR/Instructions.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/PassManager.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/IR/Value.h" 35 #include "llvm/InitializePasses.h" 36 #include "llvm/Pass.h" 37 #include "llvm/Support/BranchProbability.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include <cassert> 43 #include <cstdint> 44 #include <iterator> 45 #include <map> 46 #include <utility> 47 48 using namespace llvm; 49 50 #define DEBUG_TYPE "branch-prob" 51 52 static cl::opt<bool> PrintBranchProb( 53 "print-bpi", cl::init(false), cl::Hidden, 54 cl::desc("Print the branch probability info.")); 55 56 cl::opt<std::string> PrintBranchProbFuncName( 57 "print-bpi-func-name", cl::Hidden, 58 cl::desc("The option to specify the name of the function " 59 "whose branch probability info is printed.")); 60 61 INITIALIZE_PASS_BEGIN(BranchProbabilityInfoWrapperPass, "branch-prob", 62 "Branch Probability Analysis", false, true) 63 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 64 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 65 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 66 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) 67 INITIALIZE_PASS_END(BranchProbabilityInfoWrapperPass, "branch-prob", 68 "Branch Probability Analysis", false, true) 69 70 BranchProbabilityInfoWrapperPass::BranchProbabilityInfoWrapperPass() 71 : FunctionPass(ID) { 72 initializeBranchProbabilityInfoWrapperPassPass( 73 *PassRegistry::getPassRegistry()); 74 } 75 76 char BranchProbabilityInfoWrapperPass::ID = 0; 77 78 // Weights are for internal use only. They are used by heuristics to help to 79 // estimate edges' probability. Example: 80 // 81 // Using "Loop Branch Heuristics" we predict weights of edges for the 82 // block BB2. 83 // ... 84 // | 85 // V 86 // BB1<-+ 87 // | | 88 // | | (Weight = 124) 89 // V | 90 // BB2--+ 91 // | 92 // | (Weight = 4) 93 // V 94 // BB3 95 // 96 // Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875 97 // Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125 98 static const uint32_t LBH_TAKEN_WEIGHT = 124; 99 static const uint32_t LBH_NONTAKEN_WEIGHT = 4; 100 101 /// Unreachable-terminating branch taken probability. 102 /// 103 /// This is the probability for a branch being taken to a block that terminates 104 /// (eventually) in unreachable. These are predicted as unlikely as possible. 105 /// All reachable probability will proportionally share the remaining part. 106 static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1); 107 108 /// Heuristics and lookup tables for non-loop branches: 109 /// Pointer Heuristics (PH) 110 static const uint32_t PH_TAKEN_WEIGHT = 20; 111 static const uint32_t PH_NONTAKEN_WEIGHT = 12; 112 static const BranchProbability 113 PtrTakenProb(PH_TAKEN_WEIGHT, PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT); 114 static const BranchProbability 115 PtrUntakenProb(PH_NONTAKEN_WEIGHT, PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT); 116 117 using ProbabilityList = SmallVector<BranchProbability>; 118 using ProbabilityTable = std::map<CmpInst::Predicate, ProbabilityList>; 119 120 /// Pointer comparisons: 121 static const ProbabilityTable PointerTable{ 122 {ICmpInst::ICMP_NE, {PtrTakenProb, PtrUntakenProb}}, /// p != q -> Likely 123 {ICmpInst::ICMP_EQ, {PtrUntakenProb, PtrTakenProb}}, /// p == q -> Unlikely 124 }; 125 126 /// Zero Heuristics (ZH) 127 static const uint32_t ZH_TAKEN_WEIGHT = 20; 128 static const uint32_t ZH_NONTAKEN_WEIGHT = 12; 129 static const BranchProbability 130 ZeroTakenProb(ZH_TAKEN_WEIGHT, ZH_TAKEN_WEIGHT + ZH_NONTAKEN_WEIGHT); 131 static const BranchProbability 132 ZeroUntakenProb(ZH_NONTAKEN_WEIGHT, ZH_TAKEN_WEIGHT + ZH_NONTAKEN_WEIGHT); 133 134 /// Integer compares with 0: 135 static const ProbabilityTable ICmpWithZeroTable{ 136 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, /// X == 0 -> Unlikely 137 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, /// X != 0 -> Likely 138 {CmpInst::ICMP_SLT, {ZeroUntakenProb, ZeroTakenProb}}, /// X < 0 -> Unlikely 139 {CmpInst::ICMP_SGT, {ZeroTakenProb, ZeroUntakenProb}}, /// X > 0 -> Likely 140 }; 141 142 /// Integer compares with -1: 143 static const ProbabilityTable ICmpWithMinusOneTable{ 144 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, /// X == -1 -> Unlikely 145 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, /// X != -1 -> Likely 146 // InstCombine canonicalizes X >= 0 into X > -1 147 {CmpInst::ICMP_SGT, {ZeroTakenProb, ZeroUntakenProb}}, /// X >= 0 -> Likely 148 }; 149 150 /// Integer compares with 1: 151 static const ProbabilityTable ICmpWithOneTable{ 152 // InstCombine canonicalizes X <= 0 into X < 1 153 {CmpInst::ICMP_SLT, {ZeroUntakenProb, ZeroTakenProb}}, /// X <= 0 -> Unlikely 154 }; 155 156 /// strcmp and similar functions return zero, negative, or positive, if the 157 /// first string is equal, less, or greater than the second. We consider it 158 /// likely that the strings are not equal, so a comparison with zero is 159 /// probably false, but also a comparison with any other number is also 160 /// probably false given that what exactly is returned for nonzero values is 161 /// not specified. Any kind of comparison other than equality we know 162 /// nothing about. 163 static const ProbabilityTable ICmpWithLibCallTable{ 164 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, 165 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, 166 }; 167 168 // Floating-Point Heuristics (FPH) 169 static const uint32_t FPH_TAKEN_WEIGHT = 20; 170 static const uint32_t FPH_NONTAKEN_WEIGHT = 12; 171 172 /// This is the probability for an ordered floating point comparison. 173 static const uint32_t FPH_ORD_WEIGHT = 1024 * 1024 - 1; 174 /// This is the probability for an unordered floating point comparison, it means 175 /// one or two of the operands are NaN. Usually it is used to test for an 176 /// exceptional case, so the result is unlikely. 177 static const uint32_t FPH_UNO_WEIGHT = 1; 178 179 static const BranchProbability FPOrdTakenProb(FPH_ORD_WEIGHT, 180 FPH_ORD_WEIGHT + FPH_UNO_WEIGHT); 181 static const BranchProbability 182 FPOrdUntakenProb(FPH_UNO_WEIGHT, FPH_ORD_WEIGHT + FPH_UNO_WEIGHT); 183 static const BranchProbability 184 FPTakenProb(FPH_TAKEN_WEIGHT, FPH_TAKEN_WEIGHT + FPH_NONTAKEN_WEIGHT); 185 static const BranchProbability 186 FPUntakenProb(FPH_NONTAKEN_WEIGHT, FPH_TAKEN_WEIGHT + FPH_NONTAKEN_WEIGHT); 187 188 /// Floating-Point compares: 189 static const ProbabilityTable FCmpTable{ 190 {FCmpInst::FCMP_ORD, {FPOrdTakenProb, FPOrdUntakenProb}}, /// !isnan -> Likely 191 {FCmpInst::FCMP_UNO, {FPOrdUntakenProb, FPOrdTakenProb}}, /// isnan -> Unlikely 192 }; 193 194 /// Set of dedicated "absolute" execution weights for a block. These weights are 195 /// meaningful relative to each other and their derivatives only. 196 enum class BlockExecWeight : std::uint32_t { 197 /// Special weight used for cases with exact zero probability. 198 ZERO = 0x0, 199 /// Minimal possible non zero weight. 200 LOWEST_NON_ZERO = 0x1, 201 /// Weight to an 'unreachable' block. 202 UNREACHABLE = ZERO, 203 /// Weight to a block containing non returning call. 204 NORETURN = LOWEST_NON_ZERO, 205 /// Weight to 'unwind' block of an invoke instruction. 206 UNWIND = LOWEST_NON_ZERO, 207 /// Weight to a 'cold' block. Cold blocks are the ones containing calls marked 208 /// with attribute 'cold'. 209 COLD = 0xffff, 210 /// Default weight is used in cases when there is no dedicated execution 211 /// weight set. It is not propagated through the domination line either. 212 DEFAULT = 0xfffff 213 }; 214 215 BranchProbabilityInfo::SccInfo::SccInfo(const Function &F) { 216 // Record SCC numbers of blocks in the CFG to identify irreducible loops. 217 // FIXME: We could only calculate this if the CFG is known to be irreducible 218 // (perhaps cache this info in LoopInfo if we can easily calculate it there?). 219 int SccNum = 0; 220 for (scc_iterator<const Function *> It = scc_begin(&F); !It.isAtEnd(); 221 ++It, ++SccNum) { 222 // Ignore single-block SCCs since they either aren't loops or LoopInfo will 223 // catch them. 224 const std::vector<const BasicBlock *> &Scc = *It; 225 if (Scc.size() == 1) 226 continue; 227 228 LLVM_DEBUG(dbgs() << "BPI: SCC " << SccNum << ":"); 229 for (const auto *BB : Scc) { 230 LLVM_DEBUG(dbgs() << " " << BB->getName()); 231 SccNums[BB] = SccNum; 232 calculateSccBlockType(BB, SccNum); 233 } 234 LLVM_DEBUG(dbgs() << "\n"); 235 } 236 } 237 238 int BranchProbabilityInfo::SccInfo::getSCCNum(const BasicBlock *BB) const { 239 auto SccIt = SccNums.find(BB); 240 if (SccIt == SccNums.end()) 241 return -1; 242 return SccIt->second; 243 } 244 245 void BranchProbabilityInfo::SccInfo::getSccEnterBlocks( 246 int SccNum, SmallVectorImpl<BasicBlock *> &Enters) const { 247 248 for (auto MapIt : SccBlocks[SccNum]) { 249 const auto *BB = MapIt.first; 250 if (isSCCHeader(BB, SccNum)) 251 for (const auto *Pred : predecessors(BB)) 252 if (getSCCNum(Pred) != SccNum) 253 Enters.push_back(const_cast<BasicBlock *>(BB)); 254 } 255 } 256 257 void BranchProbabilityInfo::SccInfo::getSccExitBlocks( 258 int SccNum, SmallVectorImpl<BasicBlock *> &Exits) const { 259 for (auto MapIt : SccBlocks[SccNum]) { 260 const auto *BB = MapIt.first; 261 if (isSCCExitingBlock(BB, SccNum)) 262 for (const auto *Succ : successors(BB)) 263 if (getSCCNum(Succ) != SccNum) 264 Exits.push_back(const_cast<BasicBlock *>(Succ)); 265 } 266 } 267 268 uint32_t BranchProbabilityInfo::SccInfo::getSccBlockType(const BasicBlock *BB, 269 int SccNum) const { 270 assert(getSCCNum(BB) == SccNum); 271 272 assert(SccBlocks.size() > static_cast<unsigned>(SccNum) && "Unknown SCC"); 273 const auto &SccBlockTypes = SccBlocks[SccNum]; 274 275 auto It = SccBlockTypes.find(BB); 276 if (It != SccBlockTypes.end()) { 277 return It->second; 278 } 279 return Inner; 280 } 281 282 void BranchProbabilityInfo::SccInfo::calculateSccBlockType(const BasicBlock *BB, 283 int SccNum) { 284 assert(getSCCNum(BB) == SccNum); 285 uint32_t BlockType = Inner; 286 287 if (llvm::any_of(predecessors(BB), [&](const BasicBlock *Pred) { 288 // Consider any block that is an entry point to the SCC as 289 // a header. 290 return getSCCNum(Pred) != SccNum; 291 })) 292 BlockType |= Header; 293 294 if (llvm::any_of(successors(BB), [&](const BasicBlock *Succ) { 295 return getSCCNum(Succ) != SccNum; 296 })) 297 BlockType |= Exiting; 298 299 // Lazily compute the set of headers for a given SCC and cache the results 300 // in the SccHeaderMap. 301 if (SccBlocks.size() <= static_cast<unsigned>(SccNum)) 302 SccBlocks.resize(SccNum + 1); 303 auto &SccBlockTypes = SccBlocks[SccNum]; 304 305 if (BlockType != Inner) { 306 bool IsInserted; 307 std::tie(std::ignore, IsInserted) = 308 SccBlockTypes.insert(std::make_pair(BB, BlockType)); 309 assert(IsInserted && "Duplicated block in SCC"); 310 } 311 } 312 313 BranchProbabilityInfo::LoopBlock::LoopBlock(const BasicBlock *BB, 314 const LoopInfo &LI, 315 const SccInfo &SccI) 316 : BB(BB) { 317 LD.first = LI.getLoopFor(BB); 318 if (!LD.first) { 319 LD.second = SccI.getSCCNum(BB); 320 } 321 } 322 323 bool BranchProbabilityInfo::isLoopEnteringEdge(const LoopEdge &Edge) const { 324 const auto &SrcBlock = Edge.first; 325 const auto &DstBlock = Edge.second; 326 return (DstBlock.getLoop() && 327 !DstBlock.getLoop()->contains(SrcBlock.getLoop())) || 328 // Assume that SCCs can't be nested. 329 (DstBlock.getSccNum() != -1 && 330 SrcBlock.getSccNum() != DstBlock.getSccNum()); 331 } 332 333 bool BranchProbabilityInfo::isLoopExitingEdge(const LoopEdge &Edge) const { 334 return isLoopEnteringEdge({Edge.second, Edge.first}); 335 } 336 337 bool BranchProbabilityInfo::isLoopEnteringExitingEdge( 338 const LoopEdge &Edge) const { 339 return isLoopEnteringEdge(Edge) || isLoopExitingEdge(Edge); 340 } 341 342 bool BranchProbabilityInfo::isLoopBackEdge(const LoopEdge &Edge) const { 343 const auto &SrcBlock = Edge.first; 344 const auto &DstBlock = Edge.second; 345 return SrcBlock.belongsToSameLoop(DstBlock) && 346 ((DstBlock.getLoop() && 347 DstBlock.getLoop()->getHeader() == DstBlock.getBlock()) || 348 (DstBlock.getSccNum() != -1 && 349 SccI->isSCCHeader(DstBlock.getBlock(), DstBlock.getSccNum()))); 350 } 351 352 void BranchProbabilityInfo::getLoopEnterBlocks( 353 const LoopBlock &LB, SmallVectorImpl<BasicBlock *> &Enters) const { 354 if (LB.getLoop()) { 355 auto *Header = LB.getLoop()->getHeader(); 356 Enters.append(pred_begin(Header), pred_end(Header)); 357 } else { 358 assert(LB.getSccNum() != -1 && "LB doesn't belong to any loop?"); 359 SccI->getSccEnterBlocks(LB.getSccNum(), Enters); 360 } 361 } 362 363 void BranchProbabilityInfo::getLoopExitBlocks( 364 const LoopBlock &LB, SmallVectorImpl<BasicBlock *> &Exits) const { 365 if (LB.getLoop()) { 366 LB.getLoop()->getExitBlocks(Exits); 367 } else { 368 assert(LB.getSccNum() != -1 && "LB doesn't belong to any loop?"); 369 SccI->getSccExitBlocks(LB.getSccNum(), Exits); 370 } 371 } 372 373 // Propagate existing explicit probabilities from either profile data or 374 // 'expect' intrinsic processing. Examine metadata against unreachable 375 // heuristic. The probability of the edge coming to unreachable block is 376 // set to min of metadata and unreachable heuristic. 377 bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { 378 const Instruction *TI = BB->getTerminator(); 379 assert(TI->getNumSuccessors() > 1 && "expected more than one successor!"); 380 if (!(isa<BranchInst>(TI) || isa<SwitchInst>(TI) || isa<IndirectBrInst>(TI) || 381 isa<InvokeInst>(TI))) 382 return false; 383 384 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof); 385 if (!WeightsNode) 386 return false; 387 388 // Check that the number of successors is manageable. 389 assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors"); 390 391 // Ensure there are weights for all of the successors. Note that the first 392 // operand to the metadata node is a name, not a weight. 393 if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1) 394 return false; 395 396 // Build up the final weights that will be used in a temporary buffer. 397 // Compute the sum of all weights to later decide whether they need to 398 // be scaled to fit in 32 bits. 399 uint64_t WeightSum = 0; 400 SmallVector<uint32_t, 2> Weights; 401 SmallVector<unsigned, 2> UnreachableIdxs; 402 SmallVector<unsigned, 2> ReachableIdxs; 403 Weights.reserve(TI->getNumSuccessors()); 404 for (unsigned I = 1, E = WeightsNode->getNumOperands(); I != E; ++I) { 405 ConstantInt *Weight = 406 mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(I)); 407 if (!Weight) 408 return false; 409 assert(Weight->getValue().getActiveBits() <= 32 && 410 "Too many bits for uint32_t"); 411 Weights.push_back(Weight->getZExtValue()); 412 WeightSum += Weights.back(); 413 const LoopBlock SrcLoopBB = getLoopBlock(BB); 414 const LoopBlock DstLoopBB = getLoopBlock(TI->getSuccessor(I - 1)); 415 auto EstimatedWeight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB}); 416 if (EstimatedWeight && 417 *EstimatedWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE)) 418 UnreachableIdxs.push_back(I - 1); 419 else 420 ReachableIdxs.push_back(I - 1); 421 } 422 assert(Weights.size() == TI->getNumSuccessors() && "Checked above"); 423 424 // If the sum of weights does not fit in 32 bits, scale every weight down 425 // accordingly. 426 uint64_t ScalingFactor = 427 (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1; 428 429 if (ScalingFactor > 1) { 430 WeightSum = 0; 431 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) { 432 Weights[I] /= ScalingFactor; 433 WeightSum += Weights[I]; 434 } 435 } 436 assert(WeightSum <= UINT32_MAX && 437 "Expected weights to scale down to 32 bits"); 438 439 if (WeightSum == 0 || ReachableIdxs.size() == 0) { 440 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) 441 Weights[I] = 1; 442 WeightSum = TI->getNumSuccessors(); 443 } 444 445 // Set the probability. 446 SmallVector<BranchProbability, 2> BP; 447 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) 448 BP.push_back({ Weights[I], static_cast<uint32_t>(WeightSum) }); 449 450 // Examine the metadata against unreachable heuristic. 451 // If the unreachable heuristic is more strong then we use it for this edge. 452 if (UnreachableIdxs.size() == 0 || ReachableIdxs.size() == 0) { 453 setEdgeProbability(BB, BP); 454 return true; 455 } 456 457 auto UnreachableProb = UR_TAKEN_PROB; 458 for (auto I : UnreachableIdxs) 459 if (UnreachableProb < BP[I]) { 460 BP[I] = UnreachableProb; 461 } 462 463 // Sum of all edge probabilities must be 1.0. If we modified the probability 464 // of some edges then we must distribute the introduced difference over the 465 // reachable blocks. 466 // 467 // Proportional distribution: the relation between probabilities of the 468 // reachable edges is kept unchanged. That is for any reachable edges i and j: 469 // newBP[i] / newBP[j] == oldBP[i] / oldBP[j] => 470 // newBP[i] / oldBP[i] == newBP[j] / oldBP[j] == K 471 // Where K is independent of i,j. 472 // newBP[i] == oldBP[i] * K 473 // We need to find K. 474 // Make sum of all reachables of the left and right parts: 475 // sum_of_reachable(newBP) == K * sum_of_reachable(oldBP) 476 // Sum of newBP must be equal to 1.0: 477 // sum_of_reachable(newBP) + sum_of_unreachable(newBP) == 1.0 => 478 // sum_of_reachable(newBP) = 1.0 - sum_of_unreachable(newBP) 479 // Where sum_of_unreachable(newBP) is what has been just changed. 480 // Finally: 481 // K == sum_of_reachable(newBP) / sum_of_reachable(oldBP) => 482 // K == (1.0 - sum_of_unreachable(newBP)) / sum_of_reachable(oldBP) 483 BranchProbability NewUnreachableSum = BranchProbability::getZero(); 484 for (auto I : UnreachableIdxs) 485 NewUnreachableSum += BP[I]; 486 487 BranchProbability NewReachableSum = 488 BranchProbability::getOne() - NewUnreachableSum; 489 490 BranchProbability OldReachableSum = BranchProbability::getZero(); 491 for (auto I : ReachableIdxs) 492 OldReachableSum += BP[I]; 493 494 if (OldReachableSum != NewReachableSum) { // Anything to dsitribute? 495 if (OldReachableSum.isZero()) { 496 // If all oldBP[i] are zeroes then the proportional distribution results 497 // in all zero probabilities and the error stays big. In this case we 498 // evenly spread NewReachableSum over the reachable edges. 499 BranchProbability PerEdge = NewReachableSum / ReachableIdxs.size(); 500 for (auto I : ReachableIdxs) 501 BP[I] = PerEdge; 502 } else { 503 for (auto I : ReachableIdxs) { 504 // We use uint64_t to avoid double rounding error of the following 505 // calculation: BP[i] = BP[i] * NewReachableSum / OldReachableSum 506 // The formula is taken from the private constructor 507 // BranchProbability(uint32_t Numerator, uint32_t Denominator) 508 uint64_t Mul = static_cast<uint64_t>(NewReachableSum.getNumerator()) * 509 BP[I].getNumerator(); 510 uint32_t Div = static_cast<uint32_t>( 511 divideNearest(Mul, OldReachableSum.getNumerator())); 512 BP[I] = BranchProbability::getRaw(Div); 513 } 514 } 515 } 516 517 setEdgeProbability(BB, BP); 518 519 return true; 520 } 521 522 // Calculate Edge Weights using "Pointer Heuristics". Predict a comparison 523 // between two pointer or pointer and NULL will fail. 524 bool BranchProbabilityInfo::calcPointerHeuristics(const BasicBlock *BB) { 525 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 526 if (!BI || !BI->isConditional()) 527 return false; 528 529 Value *Cond = BI->getCondition(); 530 ICmpInst *CI = dyn_cast<ICmpInst>(Cond); 531 if (!CI || !CI->isEquality()) 532 return false; 533 534 Value *LHS = CI->getOperand(0); 535 536 if (!LHS->getType()->isPointerTy()) 537 return false; 538 539 assert(CI->getOperand(1)->getType()->isPointerTy()); 540 541 auto Search = PointerTable.find(CI->getPredicate()); 542 if (Search == PointerTable.end()) 543 return false; 544 setEdgeProbability(BB, Search->second); 545 return true; 546 } 547 548 // Compute the unlikely successors to the block BB in the loop L, specifically 549 // those that are unlikely because this is a loop, and add them to the 550 // UnlikelyBlocks set. 551 static void 552 computeUnlikelySuccessors(const BasicBlock *BB, Loop *L, 553 SmallPtrSetImpl<const BasicBlock*> &UnlikelyBlocks) { 554 // Sometimes in a loop we have a branch whose condition is made false by 555 // taking it. This is typically something like 556 // int n = 0; 557 // while (...) { 558 // if (++n >= MAX) { 559 // n = 0; 560 // } 561 // } 562 // In this sort of situation taking the branch means that at the very least it 563 // won't be taken again in the next iteration of the loop, so we should 564 // consider it less likely than a typical branch. 565 // 566 // We detect this by looking back through the graph of PHI nodes that sets the 567 // value that the condition depends on, and seeing if we can reach a successor 568 // block which can be determined to make the condition false. 569 // 570 // FIXME: We currently consider unlikely blocks to be half as likely as other 571 // blocks, but if we consider the example above the likelyhood is actually 572 // 1/MAX. We could therefore be more precise in how unlikely we consider 573 // blocks to be, but it would require more careful examination of the form 574 // of the comparison expression. 575 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 576 if (!BI || !BI->isConditional()) 577 return; 578 579 // Check if the branch is based on an instruction compared with a constant 580 CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition()); 581 if (!CI || !isa<Instruction>(CI->getOperand(0)) || 582 !isa<Constant>(CI->getOperand(1))) 583 return; 584 585 // Either the instruction must be a PHI, or a chain of operations involving 586 // constants that ends in a PHI which we can then collapse into a single value 587 // if the PHI value is known. 588 Instruction *CmpLHS = dyn_cast<Instruction>(CI->getOperand(0)); 589 PHINode *CmpPHI = dyn_cast<PHINode>(CmpLHS); 590 Constant *CmpConst = dyn_cast<Constant>(CI->getOperand(1)); 591 // Collect the instructions until we hit a PHI 592 SmallVector<BinaryOperator *, 1> InstChain; 593 while (!CmpPHI && CmpLHS && isa<BinaryOperator>(CmpLHS) && 594 isa<Constant>(CmpLHS->getOperand(1))) { 595 // Stop if the chain extends outside of the loop 596 if (!L->contains(CmpLHS)) 597 return; 598 InstChain.push_back(cast<BinaryOperator>(CmpLHS)); 599 CmpLHS = dyn_cast<Instruction>(CmpLHS->getOperand(0)); 600 if (CmpLHS) 601 CmpPHI = dyn_cast<PHINode>(CmpLHS); 602 } 603 if (!CmpPHI || !L->contains(CmpPHI)) 604 return; 605 606 // Trace the phi node to find all values that come from successors of BB 607 SmallPtrSet<PHINode*, 8> VisitedInsts; 608 SmallVector<PHINode*, 8> WorkList; 609 WorkList.push_back(CmpPHI); 610 VisitedInsts.insert(CmpPHI); 611 while (!WorkList.empty()) { 612 PHINode *P = WorkList.pop_back_val(); 613 for (BasicBlock *B : P->blocks()) { 614 // Skip blocks that aren't part of the loop 615 if (!L->contains(B)) 616 continue; 617 Value *V = P->getIncomingValueForBlock(B); 618 // If the source is a PHI add it to the work list if we haven't 619 // already visited it. 620 if (PHINode *PN = dyn_cast<PHINode>(V)) { 621 if (VisitedInsts.insert(PN).second) 622 WorkList.push_back(PN); 623 continue; 624 } 625 // If this incoming value is a constant and B is a successor of BB, then 626 // we can constant-evaluate the compare to see if it makes the branch be 627 // taken or not. 628 Constant *CmpLHSConst = dyn_cast<Constant>(V); 629 if (!CmpLHSConst || !llvm::is_contained(successors(BB), B)) 630 continue; 631 // First collapse InstChain 632 for (Instruction *I : llvm::reverse(InstChain)) { 633 CmpLHSConst = ConstantExpr::get(I->getOpcode(), CmpLHSConst, 634 cast<Constant>(I->getOperand(1)), true); 635 if (!CmpLHSConst) 636 break; 637 } 638 if (!CmpLHSConst) 639 continue; 640 // Now constant-evaluate the compare 641 Constant *Result = ConstantExpr::getCompare(CI->getPredicate(), 642 CmpLHSConst, CmpConst, true); 643 // If the result means we don't branch to the block then that block is 644 // unlikely. 645 if (Result && 646 ((Result->isZeroValue() && B == BI->getSuccessor(0)) || 647 (Result->isOneValue() && B == BI->getSuccessor(1)))) 648 UnlikelyBlocks.insert(B); 649 } 650 } 651 } 652 653 Optional<uint32_t> 654 BranchProbabilityInfo::getEstimatedBlockWeight(const BasicBlock *BB) const { 655 auto WeightIt = EstimatedBlockWeight.find(BB); 656 if (WeightIt == EstimatedBlockWeight.end()) 657 return None; 658 return WeightIt->second; 659 } 660 661 Optional<uint32_t> 662 BranchProbabilityInfo::getEstimatedLoopWeight(const LoopData &L) const { 663 auto WeightIt = EstimatedLoopWeight.find(L); 664 if (WeightIt == EstimatedLoopWeight.end()) 665 return None; 666 return WeightIt->second; 667 } 668 669 Optional<uint32_t> 670 BranchProbabilityInfo::getEstimatedEdgeWeight(const LoopEdge &Edge) const { 671 // For edges entering a loop take weight of a loop rather than an individual 672 // block in the loop. 673 return isLoopEnteringEdge(Edge) 674 ? getEstimatedLoopWeight(Edge.second.getLoopData()) 675 : getEstimatedBlockWeight(Edge.second.getBlock()); 676 } 677 678 template <class IterT> 679 Optional<uint32_t> BranchProbabilityInfo::getMaxEstimatedEdgeWeight( 680 const LoopBlock &SrcLoopBB, iterator_range<IterT> Successors) const { 681 SmallVector<uint32_t, 4> Weights; 682 Optional<uint32_t> MaxWeight; 683 for (const BasicBlock *DstBB : Successors) { 684 const LoopBlock DstLoopBB = getLoopBlock(DstBB); 685 auto Weight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB}); 686 687 if (!Weight) 688 return None; 689 690 if (!MaxWeight || *MaxWeight < *Weight) 691 MaxWeight = Weight; 692 } 693 694 return MaxWeight; 695 } 696 697 // Updates \p LoopBB's weight and returns true. If \p LoopBB has already 698 // an associated weight it is unchanged and false is returned. 699 // 700 // Please note by the algorithm the weight is not expected to change once set 701 // thus 'false' status is used to track visited blocks. 702 bool BranchProbabilityInfo::updateEstimatedBlockWeight( 703 LoopBlock &LoopBB, uint32_t BBWeight, 704 SmallVectorImpl<BasicBlock *> &BlockWorkList, 705 SmallVectorImpl<LoopBlock> &LoopWorkList) { 706 BasicBlock *BB = LoopBB.getBlock(); 707 708 // In general, weight is assigned to a block when it has final value and 709 // can't/shouldn't be changed. However, there are cases when a block 710 // inherently has several (possibly "contradicting") weights. For example, 711 // "unwind" block may also contain "cold" call. In that case the first 712 // set weight is favored and all consequent weights are ignored. 713 if (!EstimatedBlockWeight.insert({BB, BBWeight}).second) 714 return false; 715 716 for (BasicBlock *PredBlock : predecessors(BB)) { 717 LoopBlock PredLoop = getLoopBlock(PredBlock); 718 // Add affected block/loop to a working list. 719 if (isLoopExitingEdge({PredLoop, LoopBB})) { 720 if (!EstimatedLoopWeight.count(PredLoop.getLoopData())) 721 LoopWorkList.push_back(PredLoop); 722 } else if (!EstimatedBlockWeight.count(PredBlock)) 723 BlockWorkList.push_back(PredBlock); 724 } 725 return true; 726 } 727 728 // Starting from \p BB traverse through dominator blocks and assign \p BBWeight 729 // to all such blocks that are post dominated by \BB. In other words to all 730 // blocks that the one is executed if and only if another one is executed. 731 // Importantly, we skip loops here for two reasons. First weights of blocks in 732 // a loop should be scaled by trip count (yet possibly unknown). Second there is 733 // no any value in doing that because that doesn't give any additional 734 // information regarding distribution of probabilities inside the loop. 735 // Exception is loop 'enter' and 'exit' edges that are handled in a special way 736 // at calcEstimatedHeuristics. 737 // 738 // In addition, \p WorkList is populated with basic blocks if at leas one 739 // successor has updated estimated weight. 740 void BranchProbabilityInfo::propagateEstimatedBlockWeight( 741 const LoopBlock &LoopBB, DominatorTree *DT, PostDominatorTree *PDT, 742 uint32_t BBWeight, SmallVectorImpl<BasicBlock *> &BlockWorkList, 743 SmallVectorImpl<LoopBlock> &LoopWorkList) { 744 const BasicBlock *BB = LoopBB.getBlock(); 745 const auto *DTStartNode = DT->getNode(BB); 746 const auto *PDTStartNode = PDT->getNode(BB); 747 748 // TODO: Consider propagating weight down the domination line as well. 749 for (const auto *DTNode = DTStartNode; DTNode != nullptr; 750 DTNode = DTNode->getIDom()) { 751 auto *DomBB = DTNode->getBlock(); 752 // Consider blocks which lie on one 'line'. 753 if (!PDT->dominates(PDTStartNode, PDT->getNode(DomBB))) 754 // If BB doesn't post dominate DomBB it will not post dominate dominators 755 // of DomBB as well. 756 break; 757 758 LoopBlock DomLoopBB = getLoopBlock(DomBB); 759 const LoopEdge Edge{DomLoopBB, LoopBB}; 760 // Don't propagate weight to blocks belonging to different loops. 761 if (!isLoopEnteringExitingEdge(Edge)) { 762 if (!updateEstimatedBlockWeight(DomLoopBB, BBWeight, BlockWorkList, 763 LoopWorkList)) 764 // If DomBB has weight set then all it's predecessors are already 765 // processed (since we propagate weight up to the top of IR each time). 766 break; 767 } else if (isLoopExitingEdge(Edge)) { 768 LoopWorkList.push_back(DomLoopBB); 769 } 770 } 771 } 772 773 Optional<uint32_t> BranchProbabilityInfo::getInitialEstimatedBlockWeight( 774 const BasicBlock *BB) { 775 // Returns true if \p BB has call marked with "NoReturn" attribute. 776 auto hasNoReturn = [&](const BasicBlock *BB) { 777 for (const auto &I : reverse(*BB)) 778 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 779 if (CI->hasFnAttr(Attribute::NoReturn)) 780 return true; 781 782 return false; 783 }; 784 785 // Important note regarding the order of checks. They are ordered by weight 786 // from lowest to highest. Doing that allows to avoid "unstable" results 787 // when several conditions heuristics can be applied simultaneously. 788 if (isa<UnreachableInst>(BB->getTerminator()) || 789 // If this block is terminated by a call to 790 // @llvm.experimental.deoptimize then treat it like an unreachable 791 // since it is expected to practically never execute. 792 // TODO: Should we actually treat as never returning call? 793 BB->getTerminatingDeoptimizeCall()) 794 return hasNoReturn(BB) 795 ? static_cast<uint32_t>(BlockExecWeight::NORETURN) 796 : static_cast<uint32_t>(BlockExecWeight::UNREACHABLE); 797 798 // Check if the block is 'unwind' handler of some invoke instruction. 799 for (const auto *Pred : predecessors(BB)) 800 if (Pred) 801 if (const auto *II = dyn_cast<InvokeInst>(Pred->getTerminator())) 802 if (II->getUnwindDest() == BB) 803 return static_cast<uint32_t>(BlockExecWeight::UNWIND); 804 805 // Check if the block contains 'cold' call. 806 for (const auto &I : *BB) 807 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 808 if (CI->hasFnAttr(Attribute::Cold)) 809 return static_cast<uint32_t>(BlockExecWeight::COLD); 810 811 return None; 812 } 813 814 // Does RPO traversal over all blocks in \p F and assigns weights to 815 // 'unreachable', 'noreturn', 'cold', 'unwind' blocks. In addition it does its 816 // best to propagate the weight to up/down the IR. 817 void BranchProbabilityInfo::computeEestimateBlockWeight( 818 const Function &F, DominatorTree *DT, PostDominatorTree *PDT) { 819 SmallVector<BasicBlock *, 8> BlockWorkList; 820 SmallVector<LoopBlock, 8> LoopWorkList; 821 822 // By doing RPO we make sure that all predecessors already have weights 823 // calculated before visiting theirs successors. 824 ReversePostOrderTraversal<const Function *> RPOT(&F); 825 for (const auto *BB : RPOT) 826 if (auto BBWeight = getInitialEstimatedBlockWeight(BB)) 827 // If we were able to find estimated weight for the block set it to this 828 // block and propagate up the IR. 829 propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, 830 BBWeight.getValue(), BlockWorkList, 831 LoopWorkList); 832 833 // BlockWorklist/LoopWorkList contains blocks/loops with at least one 834 // successor/exit having estimated weight. Try to propagate weight to such 835 // blocks/loops from successors/exits. 836 // Process loops and blocks. Order is not important. 837 do { 838 while (!LoopWorkList.empty()) { 839 const LoopBlock LoopBB = LoopWorkList.pop_back_val(); 840 841 if (EstimatedLoopWeight.count(LoopBB.getLoopData())) 842 continue; 843 844 SmallVector<BasicBlock *, 4> Exits; 845 getLoopExitBlocks(LoopBB, Exits); 846 auto LoopWeight = getMaxEstimatedEdgeWeight( 847 LoopBB, make_range(Exits.begin(), Exits.end())); 848 849 if (LoopWeight) { 850 // If we never exit the loop then we can enter it once at maximum. 851 if (LoopWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE)) 852 LoopWeight = static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO); 853 854 EstimatedLoopWeight.insert({LoopBB.getLoopData(), *LoopWeight}); 855 // Add all blocks entering the loop into working list. 856 getLoopEnterBlocks(LoopBB, BlockWorkList); 857 } 858 } 859 860 while (!BlockWorkList.empty()) { 861 // We can reach here only if BlockWorkList is not empty. 862 const BasicBlock *BB = BlockWorkList.pop_back_val(); 863 if (EstimatedBlockWeight.count(BB)) 864 continue; 865 866 // We take maximum over all weights of successors. In other words we take 867 // weight of "hot" path. In theory we can probably find a better function 868 // which gives higher accuracy results (comparing to "maximum") but I 869 // can't 870 // think of any right now. And I doubt it will make any difference in 871 // practice. 872 const LoopBlock LoopBB = getLoopBlock(BB); 873 auto MaxWeight = getMaxEstimatedEdgeWeight(LoopBB, successors(BB)); 874 875 if (MaxWeight) 876 propagateEstimatedBlockWeight(LoopBB, DT, PDT, *MaxWeight, 877 BlockWorkList, LoopWorkList); 878 } 879 } while (!BlockWorkList.empty() || !LoopWorkList.empty()); 880 } 881 882 // Calculate edge probabilities based on block's estimated weight. 883 // Note that gathered weights were not scaled for loops. Thus edges entering 884 // and exiting loops requires special processing. 885 bool BranchProbabilityInfo::calcEstimatedHeuristics(const BasicBlock *BB) { 886 assert(BB->getTerminator()->getNumSuccessors() > 1 && 887 "expected more than one successor!"); 888 889 const LoopBlock LoopBB = getLoopBlock(BB); 890 891 SmallPtrSet<const BasicBlock *, 8> UnlikelyBlocks; 892 uint32_t TC = LBH_TAKEN_WEIGHT / LBH_NONTAKEN_WEIGHT; 893 if (LoopBB.getLoop()) 894 computeUnlikelySuccessors(BB, LoopBB.getLoop(), UnlikelyBlocks); 895 896 // Changed to 'true' if at least one successor has estimated weight. 897 bool FoundEstimatedWeight = false; 898 SmallVector<uint32_t, 4> SuccWeights; 899 uint64_t TotalWeight = 0; 900 // Go over all successors of BB and put their weights into SuccWeights. 901 for (const BasicBlock *SuccBB : successors(BB)) { 902 Optional<uint32_t> Weight; 903 const LoopBlock SuccLoopBB = getLoopBlock(SuccBB); 904 const LoopEdge Edge{LoopBB, SuccLoopBB}; 905 906 Weight = getEstimatedEdgeWeight(Edge); 907 908 if (isLoopExitingEdge(Edge) && 909 // Avoid adjustment of ZERO weight since it should remain unchanged. 910 Weight != static_cast<uint32_t>(BlockExecWeight::ZERO)) { 911 // Scale down loop exiting weight by trip count. 912 Weight = std::max( 913 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO), 914 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)) / 915 TC); 916 } 917 bool IsUnlikelyEdge = LoopBB.getLoop() && UnlikelyBlocks.contains(SuccBB); 918 if (IsUnlikelyEdge && 919 // Avoid adjustment of ZERO weight since it should remain unchanged. 920 Weight != static_cast<uint32_t>(BlockExecWeight::ZERO)) { 921 // 'Unlikely' blocks have twice lower weight. 922 Weight = std::max( 923 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO), 924 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)) / 2); 925 } 926 927 if (Weight) 928 FoundEstimatedWeight = true; 929 930 auto WeightVal = 931 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)); 932 TotalWeight += WeightVal; 933 SuccWeights.push_back(WeightVal); 934 } 935 936 // If non of blocks have estimated weight bail out. 937 // If TotalWeight is 0 that means weight of each successor is 0 as well and 938 // equally likely. Bail out early to not deal with devision by zero. 939 if (!FoundEstimatedWeight || TotalWeight == 0) 940 return false; 941 942 assert(SuccWeights.size() == succ_size(BB) && "Missed successor?"); 943 const unsigned SuccCount = SuccWeights.size(); 944 945 // If the sum of weights does not fit in 32 bits, scale every weight down 946 // accordingly. 947 if (TotalWeight > UINT32_MAX) { 948 uint64_t ScalingFactor = TotalWeight / UINT32_MAX + 1; 949 TotalWeight = 0; 950 for (unsigned Idx = 0; Idx < SuccCount; ++Idx) { 951 SuccWeights[Idx] /= ScalingFactor; 952 if (SuccWeights[Idx] == static_cast<uint32_t>(BlockExecWeight::ZERO)) 953 SuccWeights[Idx] = 954 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO); 955 TotalWeight += SuccWeights[Idx]; 956 } 957 assert(TotalWeight <= UINT32_MAX && "Total weight overflows"); 958 } 959 960 // Finally set probabilities to edges according to estimated block weights. 961 SmallVector<BranchProbability, 4> EdgeProbabilities( 962 SuccCount, BranchProbability::getUnknown()); 963 964 for (unsigned Idx = 0; Idx < SuccCount; ++Idx) { 965 EdgeProbabilities[Idx] = 966 BranchProbability(SuccWeights[Idx], (uint32_t)TotalWeight); 967 } 968 setEdgeProbability(BB, EdgeProbabilities); 969 return true; 970 } 971 972 bool BranchProbabilityInfo::calcZeroHeuristics(const BasicBlock *BB, 973 const TargetLibraryInfo *TLI) { 974 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 975 if (!BI || !BI->isConditional()) 976 return false; 977 978 Value *Cond = BI->getCondition(); 979 ICmpInst *CI = dyn_cast<ICmpInst>(Cond); 980 if (!CI) 981 return false; 982 983 auto GetConstantInt = [](Value *V) { 984 if (auto *I = dyn_cast<BitCastInst>(V)) 985 return dyn_cast<ConstantInt>(I->getOperand(0)); 986 return dyn_cast<ConstantInt>(V); 987 }; 988 989 Value *RHS = CI->getOperand(1); 990 ConstantInt *CV = GetConstantInt(RHS); 991 if (!CV) 992 return false; 993 994 // If the LHS is the result of AND'ing a value with a single bit bitmask, 995 // we don't have information about probabilities. 996 if (Instruction *LHS = dyn_cast<Instruction>(CI->getOperand(0))) 997 if (LHS->getOpcode() == Instruction::And) 998 if (ConstantInt *AndRHS = GetConstantInt(LHS->getOperand(1))) 999 if (AndRHS->getValue().isPowerOf2()) 1000 return false; 1001 1002 // Check if the LHS is the return value of a library function 1003 LibFunc Func = NumLibFuncs; 1004 if (TLI) 1005 if (CallInst *Call = dyn_cast<CallInst>(CI->getOperand(0))) 1006 if (Function *CalledFn = Call->getCalledFunction()) 1007 TLI->getLibFunc(*CalledFn, Func); 1008 1009 ProbabilityTable::const_iterator Search; 1010 if (Func == LibFunc_strcasecmp || 1011 Func == LibFunc_strcmp || 1012 Func == LibFunc_strncasecmp || 1013 Func == LibFunc_strncmp || 1014 Func == LibFunc_memcmp || 1015 Func == LibFunc_bcmp) { 1016 Search = ICmpWithLibCallTable.find(CI->getPredicate()); 1017 if (Search == ICmpWithLibCallTable.end()) 1018 return false; 1019 } else if (CV->isZero()) { 1020 Search = ICmpWithZeroTable.find(CI->getPredicate()); 1021 if (Search == ICmpWithZeroTable.end()) 1022 return false; 1023 } else if (CV->isOne()) { 1024 Search = ICmpWithOneTable.find(CI->getPredicate()); 1025 if (Search == ICmpWithOneTable.end()) 1026 return false; 1027 } else if (CV->isMinusOne()) { 1028 Search = ICmpWithMinusOneTable.find(CI->getPredicate()); 1029 if (Search == ICmpWithMinusOneTable.end()) 1030 return false; 1031 } else { 1032 return false; 1033 } 1034 1035 setEdgeProbability(BB, Search->second); 1036 return true; 1037 } 1038 1039 bool BranchProbabilityInfo::calcFloatingPointHeuristics(const BasicBlock *BB) { 1040 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1041 if (!BI || !BI->isConditional()) 1042 return false; 1043 1044 Value *Cond = BI->getCondition(); 1045 FCmpInst *FCmp = dyn_cast<FCmpInst>(Cond); 1046 if (!FCmp) 1047 return false; 1048 1049 ProbabilityList ProbList; 1050 if (FCmp->isEquality()) { 1051 ProbList = !FCmp->isTrueWhenEqual() ? 1052 // f1 == f2 -> Unlikely 1053 ProbabilityList({FPTakenProb, FPUntakenProb}) : 1054 // f1 != f2 -> Likely 1055 ProbabilityList({FPUntakenProb, FPTakenProb}); 1056 } else { 1057 auto Search = FCmpTable.find(FCmp->getPredicate()); 1058 if (Search == FCmpTable.end()) 1059 return false; 1060 ProbList = Search->second; 1061 } 1062 1063 setEdgeProbability(BB, ProbList); 1064 return true; 1065 } 1066 1067 void BranchProbabilityInfo::releaseMemory() { 1068 Probs.clear(); 1069 Handles.clear(); 1070 } 1071 1072 bool BranchProbabilityInfo::invalidate(Function &, const PreservedAnalyses &PA, 1073 FunctionAnalysisManager::Invalidator &) { 1074 // Check whether the analysis, all analyses on functions, or the function's 1075 // CFG have been preserved. 1076 auto PAC = PA.getChecker<BranchProbabilityAnalysis>(); 1077 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>() || 1078 PAC.preservedSet<CFGAnalyses>()); 1079 } 1080 1081 void BranchProbabilityInfo::print(raw_ostream &OS) const { 1082 OS << "---- Branch Probabilities ----\n"; 1083 // We print the probabilities from the last function the analysis ran over, 1084 // or the function it is currently running over. 1085 assert(LastF && "Cannot print prior to running over a function"); 1086 for (const auto &BI : *LastF) { 1087 for (const BasicBlock *Succ : successors(&BI)) 1088 printEdgeProbability(OS << " ", &BI, Succ); 1089 } 1090 } 1091 1092 bool BranchProbabilityInfo:: 1093 isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const { 1094 // Hot probability is at least 4/5 = 80% 1095 // FIXME: Compare against a static "hot" BranchProbability. 1096 return getEdgeProbability(Src, Dst) > BranchProbability(4, 5); 1097 } 1098 1099 /// Get the raw edge probability for the edge. If can't find it, return a 1100 /// default probability 1/N where N is the number of successors. Here an edge is 1101 /// specified using PredBlock and an 1102 /// index to the successors. 1103 BranchProbability 1104 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1105 unsigned IndexInSuccessors) const { 1106 auto I = Probs.find(std::make_pair(Src, IndexInSuccessors)); 1107 assert((Probs.end() == Probs.find(std::make_pair(Src, 0))) == 1108 (Probs.end() == I) && 1109 "Probability for I-th successor must always be defined along with the " 1110 "probability for the first successor"); 1111 1112 if (I != Probs.end()) 1113 return I->second; 1114 1115 return {1, static_cast<uint32_t>(succ_size(Src))}; 1116 } 1117 1118 BranchProbability 1119 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1120 const_succ_iterator Dst) const { 1121 return getEdgeProbability(Src, Dst.getSuccessorIndex()); 1122 } 1123 1124 /// Get the raw edge probability calculated for the block pair. This returns the 1125 /// sum of all raw edge probabilities from Src to Dst. 1126 BranchProbability 1127 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1128 const BasicBlock *Dst) const { 1129 if (!Probs.count(std::make_pair(Src, 0))) 1130 return BranchProbability(llvm::count(successors(Src), Dst), succ_size(Src)); 1131 1132 auto Prob = BranchProbability::getZero(); 1133 for (const_succ_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I) 1134 if (*I == Dst) 1135 Prob += Probs.find(std::make_pair(Src, I.getSuccessorIndex()))->second; 1136 1137 return Prob; 1138 } 1139 1140 /// Set the edge probability for all edges at once. 1141 void BranchProbabilityInfo::setEdgeProbability( 1142 const BasicBlock *Src, const SmallVectorImpl<BranchProbability> &Probs) { 1143 assert(Src->getTerminator()->getNumSuccessors() == Probs.size()); 1144 eraseBlock(Src); // Erase stale data if any. 1145 if (Probs.size() == 0) 1146 return; // Nothing to set. 1147 1148 Handles.insert(BasicBlockCallbackVH(Src, this)); 1149 uint64_t TotalNumerator = 0; 1150 for (unsigned SuccIdx = 0; SuccIdx < Probs.size(); ++SuccIdx) { 1151 this->Probs[std::make_pair(Src, SuccIdx)] = Probs[SuccIdx]; 1152 LLVM_DEBUG(dbgs() << "set edge " << Src->getName() << " -> " << SuccIdx 1153 << " successor probability to " << Probs[SuccIdx] 1154 << "\n"); 1155 TotalNumerator += Probs[SuccIdx].getNumerator(); 1156 } 1157 1158 // Because of rounding errors the total probability cannot be checked to be 1159 // 1.0 exactly. That is TotalNumerator == BranchProbability::getDenominator. 1160 // Instead, every single probability in Probs must be as accurate as possible. 1161 // This results in error 1/denominator at most, thus the total absolute error 1162 // should be within Probs.size / BranchProbability::getDenominator. 1163 assert(TotalNumerator <= BranchProbability::getDenominator() + Probs.size()); 1164 assert(TotalNumerator >= BranchProbability::getDenominator() - Probs.size()); 1165 (void)TotalNumerator; 1166 } 1167 1168 void BranchProbabilityInfo::copyEdgeProbabilities(BasicBlock *Src, 1169 BasicBlock *Dst) { 1170 eraseBlock(Dst); // Erase stale data if any. 1171 unsigned NumSuccessors = Src->getTerminator()->getNumSuccessors(); 1172 assert(NumSuccessors == Dst->getTerminator()->getNumSuccessors()); 1173 if (NumSuccessors == 0) 1174 return; // Nothing to set. 1175 if (this->Probs.find(std::make_pair(Src, 0)) == this->Probs.end()) 1176 return; // No probability is set for edges from Src. Keep the same for Dst. 1177 1178 Handles.insert(BasicBlockCallbackVH(Dst, this)); 1179 for (unsigned SuccIdx = 0; SuccIdx < NumSuccessors; ++SuccIdx) { 1180 auto Prob = this->Probs[std::make_pair(Src, SuccIdx)]; 1181 this->Probs[std::make_pair(Dst, SuccIdx)] = Prob; 1182 LLVM_DEBUG(dbgs() << "set edge " << Dst->getName() << " -> " << SuccIdx 1183 << " successor probability to " << Prob << "\n"); 1184 } 1185 } 1186 1187 raw_ostream & 1188 BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS, 1189 const BasicBlock *Src, 1190 const BasicBlock *Dst) const { 1191 const BranchProbability Prob = getEdgeProbability(Src, Dst); 1192 OS << "edge " << Src->getName() << " -> " << Dst->getName() 1193 << " probability is " << Prob 1194 << (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n"); 1195 1196 return OS; 1197 } 1198 1199 void BranchProbabilityInfo::eraseBlock(const BasicBlock *BB) { 1200 LLVM_DEBUG(dbgs() << "eraseBlock " << BB->getName() << "\n"); 1201 1202 // Note that we cannot use successors of BB because the terminator of BB may 1203 // have changed when eraseBlock is called as a BasicBlockCallbackVH callback. 1204 // Instead we remove prob data for the block by iterating successors by their 1205 // indices from 0 till the last which exists. There could not be prob data for 1206 // a pair (BB, N) if there is no data for (BB, N-1) because the data is always 1207 // set for all successors from 0 to M at once by the method 1208 // setEdgeProbability(). 1209 Handles.erase(BasicBlockCallbackVH(BB, this)); 1210 for (unsigned I = 0;; ++I) { 1211 auto MapI = Probs.find(std::make_pair(BB, I)); 1212 if (MapI == Probs.end()) { 1213 assert(Probs.count(std::make_pair(BB, I + 1)) == 0 && 1214 "Must be no more successors"); 1215 return; 1216 } 1217 Probs.erase(MapI); 1218 } 1219 } 1220 1221 void BranchProbabilityInfo::calculate(const Function &F, const LoopInfo &LoopI, 1222 const TargetLibraryInfo *TLI, 1223 DominatorTree *DT, 1224 PostDominatorTree *PDT) { 1225 LLVM_DEBUG(dbgs() << "---- Branch Probability Info : " << F.getName() 1226 << " ----\n\n"); 1227 LastF = &F; // Store the last function we ran on for printing. 1228 LI = &LoopI; 1229 1230 SccI = std::make_unique<SccInfo>(F); 1231 1232 assert(EstimatedBlockWeight.empty()); 1233 assert(EstimatedLoopWeight.empty()); 1234 1235 std::unique_ptr<DominatorTree> DTPtr; 1236 std::unique_ptr<PostDominatorTree> PDTPtr; 1237 1238 if (!DT) { 1239 DTPtr = std::make_unique<DominatorTree>(const_cast<Function &>(F)); 1240 DT = DTPtr.get(); 1241 } 1242 1243 if (!PDT) { 1244 PDTPtr = std::make_unique<PostDominatorTree>(const_cast<Function &>(F)); 1245 PDT = PDTPtr.get(); 1246 } 1247 1248 computeEestimateBlockWeight(F, DT, PDT); 1249 1250 // Walk the basic blocks in post-order so that we can build up state about 1251 // the successors of a block iteratively. 1252 for (auto BB : post_order(&F.getEntryBlock())) { 1253 LLVM_DEBUG(dbgs() << "Computing probabilities for " << BB->getName() 1254 << "\n"); 1255 // If there is no at least two successors, no sense to set probability. 1256 if (BB->getTerminator()->getNumSuccessors() < 2) 1257 continue; 1258 if (calcMetadataWeights(BB)) 1259 continue; 1260 if (calcEstimatedHeuristics(BB)) 1261 continue; 1262 if (calcPointerHeuristics(BB)) 1263 continue; 1264 if (calcZeroHeuristics(BB, TLI)) 1265 continue; 1266 if (calcFloatingPointHeuristics(BB)) 1267 continue; 1268 } 1269 1270 EstimatedLoopWeight.clear(); 1271 EstimatedBlockWeight.clear(); 1272 SccI.reset(); 1273 1274 if (PrintBranchProb && 1275 (PrintBranchProbFuncName.empty() || 1276 F.getName().equals(PrintBranchProbFuncName))) { 1277 print(dbgs()); 1278 } 1279 } 1280 1281 void BranchProbabilityInfoWrapperPass::getAnalysisUsage( 1282 AnalysisUsage &AU) const { 1283 // We require DT so it's available when LI is available. The LI updating code 1284 // asserts that DT is also present so if we don't make sure that we have DT 1285 // here, that assert will trigger. 1286 AU.addRequired<DominatorTreeWrapperPass>(); 1287 AU.addRequired<LoopInfoWrapperPass>(); 1288 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1289 AU.addRequired<DominatorTreeWrapperPass>(); 1290 AU.addRequired<PostDominatorTreeWrapperPass>(); 1291 AU.setPreservesAll(); 1292 } 1293 1294 bool BranchProbabilityInfoWrapperPass::runOnFunction(Function &F) { 1295 const LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1296 const TargetLibraryInfo &TLI = 1297 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1298 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1299 PostDominatorTree &PDT = 1300 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); 1301 BPI.calculate(F, LI, &TLI, &DT, &PDT); 1302 return false; 1303 } 1304 1305 void BranchProbabilityInfoWrapperPass::releaseMemory() { BPI.releaseMemory(); } 1306 1307 void BranchProbabilityInfoWrapperPass::print(raw_ostream &OS, 1308 const Module *) const { 1309 BPI.print(OS); 1310 } 1311 1312 AnalysisKey BranchProbabilityAnalysis::Key; 1313 BranchProbabilityInfo 1314 BranchProbabilityAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1315 BranchProbabilityInfo BPI; 1316 BPI.calculate(F, AM.getResult<LoopAnalysis>(F), 1317 &AM.getResult<TargetLibraryAnalysis>(F), 1318 &AM.getResult<DominatorTreeAnalysis>(F), 1319 &AM.getResult<PostDominatorTreeAnalysis>(F)); 1320 return BPI; 1321 } 1322 1323 PreservedAnalyses 1324 BranchProbabilityPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 1325 OS << "Printing analysis results of BPI for function " 1326 << "'" << F.getName() << "':" 1327 << "\n"; 1328 AM.getResult<BranchProbabilityAnalysis>(F).print(OS); 1329 return PreservedAnalyses::all(); 1330 } 1331