1 //===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Loops should be simplified before this analysis. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/BlockFrequencyInfoImpl.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/GraphTraits.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/SCCIterator.h" 20 #include "llvm/Config/llvm-config.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/Support/BlockFrequency.h" 23 #include "llvm/Support/BranchProbability.h" 24 #include "llvm/Support/Compiler.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/ScaledNumber.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include <algorithm> 30 #include <cassert> 31 #include <cstddef> 32 #include <cstdint> 33 #include <iterator> 34 #include <list> 35 #include <numeric> 36 #include <utility> 37 #include <vector> 38 39 using namespace llvm; 40 using namespace llvm::bfi_detail; 41 42 #define DEBUG_TYPE "block-freq" 43 44 ScaledNumber<uint64_t> BlockMass::toScaled() const { 45 if (isFull()) 46 return ScaledNumber<uint64_t>(1, 0); 47 return ScaledNumber<uint64_t>(getMass() + 1, -64); 48 } 49 50 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 51 LLVM_DUMP_METHOD void BlockMass::dump() const { print(dbgs()); } 52 #endif 53 54 static char getHexDigit(int N) { 55 assert(N < 16); 56 if (N < 10) 57 return '0' + N; 58 return 'a' + N - 10; 59 } 60 61 raw_ostream &BlockMass::print(raw_ostream &OS) const { 62 for (int Digits = 0; Digits < 16; ++Digits) 63 OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf); 64 return OS; 65 } 66 67 namespace { 68 69 using BlockNode = BlockFrequencyInfoImplBase::BlockNode; 70 using Distribution = BlockFrequencyInfoImplBase::Distribution; 71 using WeightList = BlockFrequencyInfoImplBase::Distribution::WeightList; 72 using Scaled64 = BlockFrequencyInfoImplBase::Scaled64; 73 using LoopData = BlockFrequencyInfoImplBase::LoopData; 74 using Weight = BlockFrequencyInfoImplBase::Weight; 75 using FrequencyData = BlockFrequencyInfoImplBase::FrequencyData; 76 77 /// Dithering mass distributer. 78 /// 79 /// This class splits up a single mass into portions by weight, dithering to 80 /// spread out error. No mass is lost. The dithering precision depends on the 81 /// precision of the product of \a BlockMass and \a BranchProbability. 82 /// 83 /// The distribution algorithm follows. 84 /// 85 /// 1. Initialize by saving the sum of the weights in \a RemWeight and the 86 /// mass to distribute in \a RemMass. 87 /// 88 /// 2. For each portion: 89 /// 90 /// 1. Construct a branch probability, P, as the portion's weight divided 91 /// by the current value of \a RemWeight. 92 /// 2. Calculate the portion's mass as \a RemMass times P. 93 /// 3. Update \a RemWeight and \a RemMass at each portion by subtracting 94 /// the current portion's weight and mass. 95 struct DitheringDistributer { 96 uint32_t RemWeight; 97 BlockMass RemMass; 98 99 DitheringDistributer(Distribution &Dist, const BlockMass &Mass); 100 101 BlockMass takeMass(uint32_t Weight); 102 }; 103 104 } // end anonymous namespace 105 106 DitheringDistributer::DitheringDistributer(Distribution &Dist, 107 const BlockMass &Mass) { 108 Dist.normalize(); 109 RemWeight = Dist.Total; 110 RemMass = Mass; 111 } 112 113 BlockMass DitheringDistributer::takeMass(uint32_t Weight) { 114 assert(Weight && "invalid weight"); 115 assert(Weight <= RemWeight); 116 BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight); 117 118 // Decrement totals (dither). 119 RemWeight -= Weight; 120 RemMass -= Mass; 121 return Mass; 122 } 123 124 void Distribution::add(const BlockNode &Node, uint64_t Amount, 125 Weight::DistType Type) { 126 assert(Amount && "invalid weight of 0"); 127 uint64_t NewTotal = Total + Amount; 128 129 // Check for overflow. It should be impossible to overflow twice. 130 bool IsOverflow = NewTotal < Total; 131 assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow"); 132 DidOverflow |= IsOverflow; 133 134 // Update the total. 135 Total = NewTotal; 136 137 // Save the weight. 138 Weights.push_back(Weight(Type, Node, Amount)); 139 } 140 141 static void combineWeight(Weight &W, const Weight &OtherW) { 142 assert(OtherW.TargetNode.isValid()); 143 if (!W.Amount) { 144 W = OtherW; 145 return; 146 } 147 assert(W.Type == OtherW.Type); 148 assert(W.TargetNode == OtherW.TargetNode); 149 assert(OtherW.Amount && "Expected non-zero weight"); 150 if (W.Amount > W.Amount + OtherW.Amount) 151 // Saturate on overflow. 152 W.Amount = UINT64_MAX; 153 else 154 W.Amount += OtherW.Amount; 155 } 156 157 static void combineWeightsBySorting(WeightList &Weights) { 158 // Sort so edges to the same node are adjacent. 159 llvm::sort(Weights.begin(), Weights.end(), 160 [](const Weight &L, 161 const Weight &R) { return L.TargetNode < R.TargetNode; }); 162 163 // Combine adjacent edges. 164 WeightList::iterator O = Weights.begin(); 165 for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E; 166 ++O, (I = L)) { 167 *O = *I; 168 169 // Find the adjacent weights to the same node. 170 for (++L; L != E && I->TargetNode == L->TargetNode; ++L) 171 combineWeight(*O, *L); 172 } 173 174 // Erase extra entries. 175 Weights.erase(O, Weights.end()); 176 } 177 178 static void combineWeightsByHashing(WeightList &Weights) { 179 // Collect weights into a DenseMap. 180 using HashTable = DenseMap<BlockNode::IndexType, Weight>; 181 182 HashTable Combined(NextPowerOf2(2 * Weights.size())); 183 for (const Weight &W : Weights) 184 combineWeight(Combined[W.TargetNode.Index], W); 185 186 // Check whether anything changed. 187 if (Weights.size() == Combined.size()) 188 return; 189 190 // Fill in the new weights. 191 Weights.clear(); 192 Weights.reserve(Combined.size()); 193 for (const auto &I : Combined) 194 Weights.push_back(I.second); 195 } 196 197 static void combineWeights(WeightList &Weights) { 198 // Use a hash table for many successors to keep this linear. 199 if (Weights.size() > 128) { 200 combineWeightsByHashing(Weights); 201 return; 202 } 203 204 combineWeightsBySorting(Weights); 205 } 206 207 static uint64_t shiftRightAndRound(uint64_t N, int Shift) { 208 assert(Shift >= 0); 209 assert(Shift < 64); 210 if (!Shift) 211 return N; 212 return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1)); 213 } 214 215 void Distribution::normalize() { 216 // Early exit for termination nodes. 217 if (Weights.empty()) 218 return; 219 220 // Only bother if there are multiple successors. 221 if (Weights.size() > 1) 222 combineWeights(Weights); 223 224 // Early exit when combined into a single successor. 225 if (Weights.size() == 1) { 226 Total = 1; 227 Weights.front().Amount = 1; 228 return; 229 } 230 231 // Determine how much to shift right so that the total fits into 32-bits. 232 // 233 // If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1 234 // for each weight can cause a 32-bit overflow. 235 int Shift = 0; 236 if (DidOverflow) 237 Shift = 33; 238 else if (Total > UINT32_MAX) 239 Shift = 33 - countLeadingZeros(Total); 240 241 // Early exit if nothing needs to be scaled. 242 if (!Shift) { 243 // If we didn't overflow then combineWeights() shouldn't have changed the 244 // sum of the weights, but let's double-check. 245 assert(Total == std::accumulate(Weights.begin(), Weights.end(), UINT64_C(0), 246 [](uint64_t Sum, const Weight &W) { 247 return Sum + W.Amount; 248 }) && 249 "Expected total to be correct"); 250 return; 251 } 252 253 // Recompute the total through accumulation (rather than shifting it) so that 254 // it's accurate after shifting and any changes combineWeights() made above. 255 Total = 0; 256 257 // Sum the weights to each node and shift right if necessary. 258 for (Weight &W : Weights) { 259 // Scale down below UINT32_MAX. Since Shift is larger than necessary, we 260 // can round here without concern about overflow. 261 assert(W.TargetNode.isValid()); 262 W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift)); 263 assert(W.Amount <= UINT32_MAX); 264 265 // Update the total. 266 Total += W.Amount; 267 } 268 assert(Total <= UINT32_MAX); 269 } 270 271 void BlockFrequencyInfoImplBase::clear() { 272 // Swap with a default-constructed std::vector, since std::vector<>::clear() 273 // does not actually clear heap storage. 274 std::vector<FrequencyData>().swap(Freqs); 275 IsIrrLoopHeader.clear(); 276 std::vector<WorkingData>().swap(Working); 277 Loops.clear(); 278 } 279 280 /// Clear all memory not needed downstream. 281 /// 282 /// Releases all memory not used downstream. In particular, saves Freqs. 283 static void cleanup(BlockFrequencyInfoImplBase &BFI) { 284 std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs)); 285 SparseBitVector<> SavedIsIrrLoopHeader(std::move(BFI.IsIrrLoopHeader)); 286 BFI.clear(); 287 BFI.Freqs = std::move(SavedFreqs); 288 BFI.IsIrrLoopHeader = std::move(SavedIsIrrLoopHeader); 289 } 290 291 bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist, 292 const LoopData *OuterLoop, 293 const BlockNode &Pred, 294 const BlockNode &Succ, 295 uint64_t Weight) { 296 if (!Weight) 297 Weight = 1; 298 299 auto isLoopHeader = [&OuterLoop](const BlockNode &Node) { 300 return OuterLoop && OuterLoop->isHeader(Node); 301 }; 302 303 BlockNode Resolved = Working[Succ.Index].getResolvedNode(); 304 305 #ifndef NDEBUG 306 auto debugSuccessor = [&](const char *Type) { 307 dbgs() << " =>" 308 << " [" << Type << "] weight = " << Weight; 309 if (!isLoopHeader(Resolved)) 310 dbgs() << ", succ = " << getBlockName(Succ); 311 if (Resolved != Succ) 312 dbgs() << ", resolved = " << getBlockName(Resolved); 313 dbgs() << "\n"; 314 }; 315 (void)debugSuccessor; 316 #endif 317 318 if (isLoopHeader(Resolved)) { 319 DEBUG(debugSuccessor("backedge")); 320 Dist.addBackedge(Resolved, Weight); 321 return true; 322 } 323 324 if (Working[Resolved.Index].getContainingLoop() != OuterLoop) { 325 DEBUG(debugSuccessor(" exit ")); 326 Dist.addExit(Resolved, Weight); 327 return true; 328 } 329 330 if (Resolved < Pred) { 331 if (!isLoopHeader(Pred)) { 332 // If OuterLoop is an irreducible loop, we can't actually handle this. 333 assert((!OuterLoop || !OuterLoop->isIrreducible()) && 334 "unhandled irreducible control flow"); 335 336 // Irreducible backedge. Abort. 337 DEBUG(debugSuccessor("abort!!!")); 338 return false; 339 } 340 341 // If "Pred" is a loop header, then this isn't really a backedge; rather, 342 // OuterLoop must be irreducible. These false backedges can come only from 343 // secondary loop headers. 344 assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) && 345 "unhandled irreducible control flow"); 346 } 347 348 DEBUG(debugSuccessor(" local ")); 349 Dist.addLocal(Resolved, Weight); 350 return true; 351 } 352 353 bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist( 354 const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) { 355 // Copy the exit map into Dist. 356 for (const auto &I : Loop.Exits) 357 if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first, 358 I.second.getMass())) 359 // Irreducible backedge. 360 return false; 361 362 return true; 363 } 364 365 /// Compute the loop scale for a loop. 366 void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { 367 // Compute loop scale. 368 DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n"); 369 370 // Infinite loops need special handling. If we give the back edge an infinite 371 // mass, they may saturate all the other scales in the function down to 1, 372 // making all the other region temperatures look exactly the same. Choose an 373 // arbitrary scale to avoid these issues. 374 // 375 // FIXME: An alternate way would be to select a symbolic scale which is later 376 // replaced to be the maximum of all computed scales plus 1. This would 377 // appropriately describe the loop as having a large scale, without skewing 378 // the final frequency computation. 379 const Scaled64 InfiniteLoopScale(1, 12); 380 381 // LoopScale == 1 / ExitMass 382 // ExitMass == HeadMass - BackedgeMass 383 BlockMass TotalBackedgeMass; 384 for (auto &Mass : Loop.BackedgeMass) 385 TotalBackedgeMass += Mass; 386 BlockMass ExitMass = BlockMass::getFull() - TotalBackedgeMass; 387 388 // Block scale stores the inverse of the scale. If this is an infinite loop, 389 // its exit mass will be zero. In this case, use an arbitrary scale for the 390 // loop scale. 391 Loop.Scale = 392 ExitMass.isEmpty() ? InfiniteLoopScale : ExitMass.toScaled().inverse(); 393 394 DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull() 395 << " - " << TotalBackedgeMass << ")\n" 396 << " - scale = " << Loop.Scale << "\n"); 397 } 398 399 /// Package up a loop. 400 void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) { 401 DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n"); 402 403 // Clear the subloop exits to prevent quadratic memory usage. 404 for (const BlockNode &M : Loop.Nodes) { 405 if (auto *Loop = Working[M.Index].getPackagedLoop()) 406 Loop->Exits.clear(); 407 DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n"); 408 } 409 Loop.IsPackaged = true; 410 } 411 412 #ifndef NDEBUG 413 static void debugAssign(const BlockFrequencyInfoImplBase &BFI, 414 const DitheringDistributer &D, const BlockNode &T, 415 const BlockMass &M, const char *Desc) { 416 dbgs() << " => assign " << M << " (" << D.RemMass << ")"; 417 if (Desc) 418 dbgs() << " [" << Desc << "]"; 419 if (T.isValid()) 420 dbgs() << " to " << BFI.getBlockName(T); 421 dbgs() << "\n"; 422 } 423 #endif 424 425 void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source, 426 LoopData *OuterLoop, 427 Distribution &Dist) { 428 BlockMass Mass = Working[Source.Index].getMass(); 429 DEBUG(dbgs() << " => mass: " << Mass << "\n"); 430 431 // Distribute mass to successors as laid out in Dist. 432 DitheringDistributer D(Dist, Mass); 433 434 for (const Weight &W : Dist.Weights) { 435 // Check for a local edge (non-backedge and non-exit). 436 BlockMass Taken = D.takeMass(W.Amount); 437 if (W.Type == Weight::Local) { 438 Working[W.TargetNode.Index].getMass() += Taken; 439 DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr)); 440 continue; 441 } 442 443 // Backedges and exits only make sense if we're processing a loop. 444 assert(OuterLoop && "backedge or exit outside of loop"); 445 446 // Check for a backedge. 447 if (W.Type == Weight::Backedge) { 448 OuterLoop->BackedgeMass[OuterLoop->getHeaderIndex(W.TargetNode)] += Taken; 449 DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "back")); 450 continue; 451 } 452 453 // This must be an exit. 454 assert(W.Type == Weight::Exit); 455 OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken)); 456 DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "exit")); 457 } 458 } 459 460 static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI, 461 const Scaled64 &Min, const Scaled64 &Max) { 462 // Scale the Factor to a size that creates integers. Ideally, integers would 463 // be scaled so that Max == UINT64_MAX so that they can be best 464 // differentiated. However, in the presence of large frequency values, small 465 // frequencies are scaled down to 1, making it impossible to differentiate 466 // small, unequal numbers. When the spread between Min and Max frequencies 467 // fits well within MaxBits, we make the scale be at least 8. 468 const unsigned MaxBits = 64; 469 const unsigned SpreadBits = (Max / Min).lg(); 470 Scaled64 ScalingFactor; 471 if (SpreadBits <= MaxBits - 3) { 472 // If the values are small enough, make the scaling factor at least 8 to 473 // allow distinguishing small values. 474 ScalingFactor = Min.inverse(); 475 ScalingFactor <<= 3; 476 } else { 477 // If the values need more than MaxBits to be represented, saturate small 478 // frequency values down to 1 by using a scaling factor that benefits large 479 // frequency values. 480 ScalingFactor = Scaled64(1, MaxBits) / Max; 481 } 482 483 // Translate the floats to integers. 484 DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max 485 << ", factor = " << ScalingFactor << "\n"); 486 for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) { 487 Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor; 488 BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>()); 489 DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = " 490 << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled 491 << ", int = " << BFI.Freqs[Index].Integer << "\n"); 492 } 493 } 494 495 /// Unwrap a loop package. 496 /// 497 /// Visits all the members of a loop, adjusting their BlockData according to 498 /// the loop's pseudo-node. 499 static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) { 500 DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop) 501 << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale 502 << "\n"); 503 Loop.Scale *= Loop.Mass.toScaled(); 504 Loop.IsPackaged = false; 505 DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n"); 506 507 // Propagate the head scale through the loop. Since members are visited in 508 // RPO, the head scale will be updated by the loop scale first, and then the 509 // final head scale will be used for updated the rest of the members. 510 for (const BlockNode &N : Loop.Nodes) { 511 const auto &Working = BFI.Working[N.Index]; 512 Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale 513 : BFI.Freqs[N.Index].Scaled; 514 Scaled64 New = Loop.Scale * F; 515 DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New 516 << "\n"); 517 F = New; 518 } 519 } 520 521 void BlockFrequencyInfoImplBase::unwrapLoops() { 522 // Set initial frequencies from loop-local masses. 523 for (size_t Index = 0; Index < Working.size(); ++Index) 524 Freqs[Index].Scaled = Working[Index].Mass.toScaled(); 525 526 for (LoopData &Loop : Loops) 527 unwrapLoop(*this, Loop); 528 } 529 530 void BlockFrequencyInfoImplBase::finalizeMetrics() { 531 // Unwrap loop packages in reverse post-order, tracking min and max 532 // frequencies. 533 auto Min = Scaled64::getLargest(); 534 auto Max = Scaled64::getZero(); 535 for (size_t Index = 0; Index < Working.size(); ++Index) { 536 // Update min/max scale. 537 Min = std::min(Min, Freqs[Index].Scaled); 538 Max = std::max(Max, Freqs[Index].Scaled); 539 } 540 541 // Convert to integers. 542 convertFloatingToInteger(*this, Min, Max); 543 544 // Clean up data structures. 545 cleanup(*this); 546 547 // Print out the final stats. 548 DEBUG(dump()); 549 } 550 551 BlockFrequency 552 BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const { 553 if (!Node.isValid()) 554 return 0; 555 return Freqs[Node.Index].Integer; 556 } 557 558 Optional<uint64_t> 559 BlockFrequencyInfoImplBase::getBlockProfileCount(const Function &F, 560 const BlockNode &Node) const { 561 return getProfileCountFromFreq(F, getBlockFreq(Node).getFrequency()); 562 } 563 564 Optional<uint64_t> 565 BlockFrequencyInfoImplBase::getProfileCountFromFreq(const Function &F, 566 uint64_t Freq) const { 567 auto EntryCount = F.getEntryCount(); 568 if (!EntryCount) 569 return None; 570 // Use 128 bit APInt to do the arithmetic to avoid overflow. 571 APInt BlockCount(128, EntryCount.getCount()); 572 APInt BlockFreq(128, Freq); 573 APInt EntryFreq(128, getEntryFreq()); 574 BlockCount *= BlockFreq; 575 BlockCount = BlockCount.udiv(EntryFreq); 576 return BlockCount.getLimitedValue(); 577 } 578 579 bool 580 BlockFrequencyInfoImplBase::isIrrLoopHeader(const BlockNode &Node) { 581 if (!Node.isValid()) 582 return false; 583 return IsIrrLoopHeader.test(Node.Index); 584 } 585 586 Scaled64 587 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const { 588 if (!Node.isValid()) 589 return Scaled64::getZero(); 590 return Freqs[Node.Index].Scaled; 591 } 592 593 void BlockFrequencyInfoImplBase::setBlockFreq(const BlockNode &Node, 594 uint64_t Freq) { 595 assert(Node.isValid() && "Expected valid node"); 596 assert(Node.Index < Freqs.size() && "Expected legal index"); 597 Freqs[Node.Index].Integer = Freq; 598 } 599 600 std::string 601 BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const { 602 return {}; 603 } 604 605 std::string 606 BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const { 607 return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*"); 608 } 609 610 raw_ostream & 611 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, 612 const BlockNode &Node) const { 613 return OS << getFloatingBlockFreq(Node); 614 } 615 616 raw_ostream & 617 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, 618 const BlockFrequency &Freq) const { 619 Scaled64 Block(Freq.getFrequency(), 0); 620 Scaled64 Entry(getEntryFreq(), 0); 621 622 return OS << Block / Entry; 623 } 624 625 void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) { 626 Start = OuterLoop.getHeader(); 627 Nodes.reserve(OuterLoop.Nodes.size()); 628 for (auto N : OuterLoop.Nodes) 629 addNode(N); 630 indexNodes(); 631 } 632 633 void IrreducibleGraph::addNodesInFunction() { 634 Start = 0; 635 for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index) 636 if (!BFI.Working[Index].isPackaged()) 637 addNode(Index); 638 indexNodes(); 639 } 640 641 void IrreducibleGraph::indexNodes() { 642 for (auto &I : Nodes) 643 Lookup[I.Node.Index] = &I; 644 } 645 646 void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ, 647 const BFIBase::LoopData *OuterLoop) { 648 if (OuterLoop && OuterLoop->isHeader(Succ)) 649 return; 650 auto L = Lookup.find(Succ.Index); 651 if (L == Lookup.end()) 652 return; 653 IrrNode &SuccIrr = *L->second; 654 Irr.Edges.push_back(&SuccIrr); 655 SuccIrr.Edges.push_front(&Irr); 656 ++SuccIrr.NumIn; 657 } 658 659 namespace llvm { 660 661 template <> struct GraphTraits<IrreducibleGraph> { 662 using GraphT = bfi_detail::IrreducibleGraph; 663 using NodeRef = const GraphT::IrrNode *; 664 using ChildIteratorType = GraphT::IrrNode::iterator; 665 666 static NodeRef getEntryNode(const GraphT &G) { return G.StartIrr; } 667 static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); } 668 static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); } 669 }; 670 671 } // end namespace llvm 672 673 /// Find extra irreducible headers. 674 /// 675 /// Find entry blocks and other blocks with backedges, which exist when \c G 676 /// contains irreducible sub-SCCs. 677 static void findIrreducibleHeaders( 678 const BlockFrequencyInfoImplBase &BFI, 679 const IrreducibleGraph &G, 680 const std::vector<const IrreducibleGraph::IrrNode *> &SCC, 681 LoopData::NodeList &Headers, LoopData::NodeList &Others) { 682 // Map from nodes in the SCC to whether it's an entry block. 683 SmallDenseMap<const IrreducibleGraph::IrrNode *, bool, 8> InSCC; 684 685 // InSCC also acts the set of nodes in the graph. Seed it. 686 for (const auto *I : SCC) 687 InSCC[I] = false; 688 689 for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) { 690 auto &Irr = *I->first; 691 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { 692 if (InSCC.count(P)) 693 continue; 694 695 // This is an entry block. 696 I->second = true; 697 Headers.push_back(Irr.Node); 698 DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n"); 699 break; 700 } 701 } 702 assert(Headers.size() >= 2 && 703 "Expected irreducible CFG; -loop-info is likely invalid"); 704 if (Headers.size() == InSCC.size()) { 705 // Every block is a header. 706 llvm::sort(Headers.begin(), Headers.end()); 707 return; 708 } 709 710 // Look for extra headers from irreducible sub-SCCs. 711 for (const auto &I : InSCC) { 712 // Entry blocks are already headers. 713 if (I.second) 714 continue; 715 716 auto &Irr = *I.first; 717 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { 718 // Skip forward edges. 719 if (P->Node < Irr.Node) 720 continue; 721 722 // Skip predecessors from entry blocks. These can have inverted 723 // ordering. 724 if (InSCC.lookup(P)) 725 continue; 726 727 // Store the extra header. 728 Headers.push_back(Irr.Node); 729 DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n"); 730 break; 731 } 732 if (Headers.back() == Irr.Node) 733 // Added this as a header. 734 continue; 735 736 // This is not a header. 737 Others.push_back(Irr.Node); 738 DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n"); 739 } 740 llvm::sort(Headers.begin(), Headers.end()); 741 llvm::sort(Others.begin(), Others.end()); 742 } 743 744 static void createIrreducibleLoop( 745 BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G, 746 LoopData *OuterLoop, std::list<LoopData>::iterator Insert, 747 const std::vector<const IrreducibleGraph::IrrNode *> &SCC) { 748 // Translate the SCC into RPO. 749 DEBUG(dbgs() << " - found-scc\n"); 750 751 LoopData::NodeList Headers; 752 LoopData::NodeList Others; 753 findIrreducibleHeaders(BFI, G, SCC, Headers, Others); 754 755 auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(), 756 Headers.end(), Others.begin(), Others.end()); 757 758 // Update loop hierarchy. 759 for (const auto &N : Loop->Nodes) 760 if (BFI.Working[N.Index].isLoopHeader()) 761 BFI.Working[N.Index].Loop->Parent = &*Loop; 762 else 763 BFI.Working[N.Index].Loop = &*Loop; 764 } 765 766 iterator_range<std::list<LoopData>::iterator> 767 BlockFrequencyInfoImplBase::analyzeIrreducible( 768 const IrreducibleGraph &G, LoopData *OuterLoop, 769 std::list<LoopData>::iterator Insert) { 770 assert((OuterLoop == nullptr) == (Insert == Loops.begin())); 771 auto Prev = OuterLoop ? std::prev(Insert) : Loops.end(); 772 773 for (auto I = scc_begin(G); !I.isAtEnd(); ++I) { 774 if (I->size() < 2) 775 continue; 776 777 // Translate the SCC into RPO. 778 createIrreducibleLoop(*this, G, OuterLoop, Insert, *I); 779 } 780 781 if (OuterLoop) 782 return make_range(std::next(Prev), Insert); 783 return make_range(Loops.begin(), Insert); 784 } 785 786 void 787 BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) { 788 OuterLoop.Exits.clear(); 789 for (auto &Mass : OuterLoop.BackedgeMass) 790 Mass = BlockMass::getEmpty(); 791 auto O = OuterLoop.Nodes.begin() + 1; 792 for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I) 793 if (!Working[I->Index].isPackaged()) 794 *O++ = *I; 795 OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end()); 796 } 797 798 void BlockFrequencyInfoImplBase::adjustLoopHeaderMass(LoopData &Loop) { 799 assert(Loop.isIrreducible() && "this only makes sense on irreducible loops"); 800 801 // Since the loop has more than one header block, the mass flowing back into 802 // each header will be different. Adjust the mass in each header loop to 803 // reflect the masses flowing through back edges. 804 // 805 // To do this, we distribute the initial mass using the backedge masses 806 // as weights for the distribution. 807 BlockMass LoopMass = BlockMass::getFull(); 808 Distribution Dist; 809 810 DEBUG(dbgs() << "adjust-loop-header-mass:\n"); 811 for (uint32_t H = 0; H < Loop.NumHeaders; ++H) { 812 auto &HeaderNode = Loop.Nodes[H]; 813 auto &BackedgeMass = Loop.BackedgeMass[Loop.getHeaderIndex(HeaderNode)]; 814 DEBUG(dbgs() << " - Add back edge mass for node " 815 << getBlockName(HeaderNode) << ": " << BackedgeMass << "\n"); 816 if (BackedgeMass.getMass() > 0) 817 Dist.addLocal(HeaderNode, BackedgeMass.getMass()); 818 else 819 DEBUG(dbgs() << " Nothing added. Back edge mass is zero\n"); 820 } 821 822 DitheringDistributer D(Dist, LoopMass); 823 824 DEBUG(dbgs() << " Distribute loop mass " << LoopMass 825 << " to headers using above weights\n"); 826 for (const Weight &W : Dist.Weights) { 827 BlockMass Taken = D.takeMass(W.Amount); 828 assert(W.Type == Weight::Local && "all weights should be local"); 829 Working[W.TargetNode.Index].getMass() = Taken; 830 DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr)); 831 } 832 } 833 834 void BlockFrequencyInfoImplBase::distributeIrrLoopHeaderMass(Distribution &Dist) { 835 BlockMass LoopMass = BlockMass::getFull(); 836 DitheringDistributer D(Dist, LoopMass); 837 for (const Weight &W : Dist.Weights) { 838 BlockMass Taken = D.takeMass(W.Amount); 839 assert(W.Type == Weight::Local && "all weights should be local"); 840 Working[W.TargetNode.Index].getMass() = Taken; 841 DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr)); 842 } 843 } 844