1 //===- CodeLayout.cpp - Implementation of code layout algorithms ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // ExtTSP - layout of basic blocks with i-cache optimization. 10 // 11 // The algorithm tries to find a layout of nodes (basic blocks) of a given CFG 12 // optimizing jump locality and thus processor I-cache utilization. This is 13 // achieved via increasing the number of fall-through jumps and co-locating 14 // frequently executed nodes together. The name follows the underlying 15 // optimization problem, Extended-TSP, which is a generalization of classical 16 // (maximum) Traveling Salesmen Problem. 17 // 18 // The algorithm is a greedy heuristic that works with chains (ordered lists) 19 // of basic blocks. Initially all chains are isolated basic blocks. On every 20 // iteration, we pick a pair of chains whose merging yields the biggest increase 21 // in the ExtTSP score, which models how i-cache "friendly" a specific chain is. 22 // A pair of chains giving the maximum gain is merged into a new chain. The 23 // procedure stops when there is only one chain left, or when merging does not 24 // increase ExtTSP. In the latter case, the remaining chains are sorted by 25 // density in the decreasing order. 26 // 27 // An important aspect is the way two chains are merged. Unlike earlier 28 // algorithms (e.g., based on the approach of Pettis-Hansen), two 29 // chains, X and Y, are first split into three, X1, X2, and Y. Then we 30 // consider all possible ways of gluing the three chains (e.g., X1YX2, X1X2Y, 31 // X2X1Y, X2YX1, YX1X2, YX2X1) and choose the one producing the largest score. 32 // This improves the quality of the final result (the search space is larger) 33 // while keeping the implementation sufficiently fast. 34 // 35 // Reference: 36 // * A. Newell and S. Pupyrev, Improved Basic Block Reordering, 37 // IEEE Transactions on Computers, 2020 38 // 39 //===----------------------------------------------------------------------===// 40 41 #include "llvm/Transforms/Utils/CodeLayout.h" 42 #include "llvm/Support/CommandLine.h" 43 44 using namespace llvm; 45 #define DEBUG_TYPE "code-layout" 46 47 cl::opt<bool> EnableExtTspBlockPlacement( 48 "enable-ext-tsp-block-placement", cl::Hidden, cl::init(false), 49 cl::desc("Enable machine block placement based on the ext-tsp model, " 50 "optimizing I-cache utilization.")); 51 52 // Algorithm-specific constants. The values are tuned for the best performance 53 // of large-scale front-end bound binaries. 54 static cl::opt<double> 55 ForwardWeight("ext-tsp-forward-weight", cl::Hidden, cl::init(0.1), 56 cl::desc("The weight of forward jumps for ExtTSP value")); 57 58 static cl::opt<double> 59 BackwardWeight("ext-tsp-backward-weight", cl::Hidden, cl::init(0.1), 60 cl::desc("The weight of backward jumps for ExtTSP value")); 61 62 static cl::opt<unsigned> ForwardDistance( 63 "ext-tsp-forward-distance", cl::Hidden, cl::init(1024), 64 cl::desc("The maximum distance (in bytes) of a forward jump for ExtTSP")); 65 66 static cl::opt<unsigned> BackwardDistance( 67 "ext-tsp-backward-distance", cl::Hidden, cl::init(640), 68 cl::desc("The maximum distance (in bytes) of a backward jump for ExtTSP")); 69 70 // The maximum size of a chain for splitting. Larger values of the threshold 71 // may yield better quality at the cost of worsen run-time. 72 static cl::opt<unsigned> ChainSplitThreshold( 73 "ext-tsp-chain-split-threshold", cl::Hidden, cl::init(128), 74 cl::desc("The maximum size of a chain to apply splitting")); 75 76 // The option enables splitting (large) chains along in-coming and out-going 77 // jumps. This typically results in a better quality. 78 static cl::opt<bool> EnableChainSplitAlongJumps( 79 "ext-tsp-enable-chain-split-along-jumps", cl::Hidden, cl::init(true), 80 cl::desc("The maximum size of a chain to apply splitting")); 81 82 namespace { 83 84 // Epsilon for comparison of doubles. 85 constexpr double EPS = 1e-8; 86 87 // Compute the Ext-TSP score for a jump between a given pair of blocks, 88 // using their sizes, (estimated) addresses and the jump execution count. 89 double extTSPScore(uint64_t SrcAddr, uint64_t SrcSize, uint64_t DstAddr, 90 uint64_t Count) { 91 // Fallthrough 92 if (SrcAddr + SrcSize == DstAddr) { 93 // Assume that FallthroughWeight = 1.0 after normalization 94 return static_cast<double>(Count); 95 } 96 // Forward 97 if (SrcAddr + SrcSize < DstAddr) { 98 const auto Dist = DstAddr - (SrcAddr + SrcSize); 99 if (Dist <= ForwardDistance) { 100 double Prob = 1.0 - static_cast<double>(Dist) / ForwardDistance; 101 return ForwardWeight * Prob * Count; 102 } 103 return 0; 104 } 105 // Backward 106 const auto Dist = SrcAddr + SrcSize - DstAddr; 107 if (Dist <= BackwardDistance) { 108 double Prob = 1.0 - static_cast<double>(Dist) / BackwardDistance; 109 return BackwardWeight * Prob * Count; 110 } 111 return 0; 112 } 113 114 /// A type of merging two chains, X and Y. The former chain is split into 115 /// X1 and X2 and then concatenated with Y in the order specified by the type. 116 enum class MergeTypeTy : int { X_Y, X1_Y_X2, Y_X2_X1, X2_X1_Y }; 117 118 /// The gain of merging two chains, that is, the Ext-TSP score of the merge 119 /// together with the corresponfiding merge 'type' and 'offset'. 120 class MergeGainTy { 121 public: 122 explicit MergeGainTy() = default; 123 explicit MergeGainTy(double Score, size_t MergeOffset, MergeTypeTy MergeType) 124 : Score(Score), MergeOffset(MergeOffset), MergeType(MergeType) {} 125 126 double score() const { return Score; } 127 128 size_t mergeOffset() const { return MergeOffset; } 129 130 MergeTypeTy mergeType() const { return MergeType; } 131 132 // Returns 'true' iff Other is preferred over this. 133 bool operator<(const MergeGainTy &Other) const { 134 return (Other.Score > EPS && Other.Score > Score + EPS); 135 } 136 137 // Update the current gain if Other is preferred over this. 138 void updateIfLessThan(const MergeGainTy &Other) { 139 if (*this < Other) 140 *this = Other; 141 } 142 143 private: 144 double Score{-1.0}; 145 size_t MergeOffset{0}; 146 MergeTypeTy MergeType{MergeTypeTy::X_Y}; 147 }; 148 149 class Jump; 150 class Chain; 151 class ChainEdge; 152 153 /// A node in the graph, typically corresponding to a basic block in CFG. 154 class Block { 155 public: 156 Block(const Block &) = delete; 157 Block(Block &&) = default; 158 Block &operator=(const Block &) = delete; 159 Block &operator=(Block &&) = default; 160 161 // The original index of the block in CFG. 162 size_t Index{0}; 163 // The index of the block in the current chain. 164 size_t CurIndex{0}; 165 // Size of the block in the binary. 166 uint64_t Size{0}; 167 // Execution count of the block in the profile data. 168 uint64_t ExecutionCount{0}; 169 // Current chain of the node. 170 Chain *CurChain{nullptr}; 171 // An offset of the block in the current chain. 172 mutable uint64_t EstimatedAddr{0}; 173 // Forced successor of the block in CFG. 174 Block *ForcedSucc{nullptr}; 175 // Forced predecessor of the block in CFG. 176 Block *ForcedPred{nullptr}; 177 // Outgoing jumps from the block. 178 std::vector<Jump *> OutJumps; 179 // Incoming jumps to the block. 180 std::vector<Jump *> InJumps; 181 182 public: 183 explicit Block(size_t Index, uint64_t Size_, uint64_t EC) 184 : Index(Index), Size(Size_), ExecutionCount(EC) {} 185 bool isEntry() const { return Index == 0; } 186 }; 187 188 /// An arc in the graph, typically corresponding to a jump between two blocks. 189 class Jump { 190 public: 191 Jump(const Jump &) = delete; 192 Jump(Jump &&) = default; 193 Jump &operator=(const Jump &) = delete; 194 Jump &operator=(Jump &&) = default; 195 196 // Source block of the jump. 197 Block *Source; 198 // Target block of the jump. 199 Block *Target; 200 // Execution count of the arc in the profile data. 201 uint64_t ExecutionCount{0}; 202 203 public: 204 explicit Jump(Block *Source, Block *Target, uint64_t ExecutionCount) 205 : Source(Source), Target(Target), ExecutionCount(ExecutionCount) {} 206 }; 207 208 /// A chain (ordered sequence) of blocks. 209 class Chain { 210 public: 211 Chain(const Chain &) = delete; 212 Chain(Chain &&) = default; 213 Chain &operator=(const Chain &) = delete; 214 Chain &operator=(Chain &&) = default; 215 216 explicit Chain(uint64_t Id, Block *Block) 217 : Id(Id), Score(0), Blocks(1, Block) {} 218 219 uint64_t id() const { return Id; } 220 221 bool isEntry() const { return Blocks[0]->Index == 0; } 222 223 double score() const { return Score; } 224 225 void setScore(double NewScore) { Score = NewScore; } 226 227 const std::vector<Block *> &blocks() const { return Blocks; } 228 229 const std::vector<std::pair<Chain *, ChainEdge *>> &edges() const { 230 return Edges; 231 } 232 233 ChainEdge *getEdge(Chain *Other) const { 234 for (auto It : Edges) { 235 if (It.first == Other) 236 return It.second; 237 } 238 return nullptr; 239 } 240 241 void removeEdge(Chain *Other) { 242 auto It = Edges.begin(); 243 while (It != Edges.end()) { 244 if (It->first == Other) { 245 Edges.erase(It); 246 return; 247 } 248 It++; 249 } 250 } 251 252 void addEdge(Chain *Other, ChainEdge *Edge) { 253 Edges.push_back(std::make_pair(Other, Edge)); 254 } 255 256 void merge(Chain *Other, const std::vector<Block *> &MergedBlocks) { 257 Blocks = MergedBlocks; 258 // Update the block's chains 259 for (size_t Idx = 0; Idx < Blocks.size(); Idx++) { 260 Blocks[Idx]->CurChain = this; 261 Blocks[Idx]->CurIndex = Idx; 262 } 263 } 264 265 void mergeEdges(Chain *Other); 266 267 void clear() { 268 Blocks.clear(); 269 Blocks.shrink_to_fit(); 270 Edges.clear(); 271 Edges.shrink_to_fit(); 272 } 273 274 private: 275 // Unique chain identifier. 276 uint64_t Id; 277 // Cached ext-tsp score for the chain. 278 double Score; 279 // Blocks of the chain. 280 std::vector<Block *> Blocks; 281 // Adjacent chains and corresponding edges (lists of jumps). 282 std::vector<std::pair<Chain *, ChainEdge *>> Edges; 283 }; 284 285 /// An edge in CFG representing jumps between two chains. 286 /// When blocks are merged into chains, the edges are combined too so that 287 /// there is always at most one edge between a pair of chains 288 class ChainEdge { 289 public: 290 ChainEdge(const ChainEdge &) = delete; 291 ChainEdge(ChainEdge &&) = default; 292 ChainEdge &operator=(const ChainEdge &) = delete; 293 ChainEdge &operator=(ChainEdge &&) = default; 294 295 explicit ChainEdge(Jump *Jump) 296 : SrcChain(Jump->Source->CurChain), DstChain(Jump->Target->CurChain), 297 Jumps(1, Jump) {} 298 299 const std::vector<Jump *> &jumps() const { return Jumps; } 300 301 void changeEndpoint(Chain *From, Chain *To) { 302 if (From == SrcChain) 303 SrcChain = To; 304 if (From == DstChain) 305 DstChain = To; 306 } 307 308 void appendJump(Jump *Jump) { Jumps.push_back(Jump); } 309 310 void moveJumps(ChainEdge *Other) { 311 Jumps.insert(Jumps.end(), Other->Jumps.begin(), Other->Jumps.end()); 312 Other->Jumps.clear(); 313 Other->Jumps.shrink_to_fit(); 314 } 315 316 bool hasCachedMergeGain(Chain *Src, Chain *Dst) const { 317 return Src == SrcChain ? CacheValidForward : CacheValidBackward; 318 } 319 320 MergeGainTy getCachedMergeGain(Chain *Src, Chain *Dst) const { 321 return Src == SrcChain ? CachedGainForward : CachedGainBackward; 322 } 323 324 void setCachedMergeGain(Chain *Src, Chain *Dst, MergeGainTy MergeGain) { 325 if (Src == SrcChain) { 326 CachedGainForward = MergeGain; 327 CacheValidForward = true; 328 } else { 329 CachedGainBackward = MergeGain; 330 CacheValidBackward = true; 331 } 332 } 333 334 void invalidateCache() { 335 CacheValidForward = false; 336 CacheValidBackward = false; 337 } 338 339 private: 340 // Source chain. 341 Chain *SrcChain{nullptr}; 342 // Destination chain. 343 Chain *DstChain{nullptr}; 344 // Original jumps in the binary with correspinding execution counts. 345 std::vector<Jump *> Jumps; 346 // Cached ext-tsp value for merging the pair of chains. 347 // Since the gain of merging (Src, Dst) and (Dst, Src) might be different, 348 // we store both values here. 349 MergeGainTy CachedGainForward; 350 MergeGainTy CachedGainBackward; 351 // Whether the cached value must be recomputed. 352 bool CacheValidForward{false}; 353 bool CacheValidBackward{false}; 354 }; 355 356 void Chain::mergeEdges(Chain *Other) { 357 assert(this != Other && "cannot merge a chain with itself"); 358 359 // Update edges adjacent to chain Other 360 for (auto EdgeIt : Other->Edges) { 361 const auto DstChain = EdgeIt.first; 362 const auto DstEdge = EdgeIt.second; 363 const auto TargetChain = DstChain == Other ? this : DstChain; 364 auto CurEdge = getEdge(TargetChain); 365 if (CurEdge == nullptr) { 366 DstEdge->changeEndpoint(Other, this); 367 this->addEdge(TargetChain, DstEdge); 368 if (DstChain != this && DstChain != Other) { 369 DstChain->addEdge(this, DstEdge); 370 } 371 } else { 372 CurEdge->moveJumps(DstEdge); 373 } 374 // Cleanup leftover edge 375 if (DstChain != Other) { 376 DstChain->removeEdge(Other); 377 } 378 } 379 } 380 381 using BlockIter = std::vector<Block *>::const_iterator; 382 383 /// A wrapper around three chains of blocks; it is used to avoid extra 384 /// instantiation of the vectors. 385 class MergedChain { 386 public: 387 MergedChain(BlockIter Begin1, BlockIter End1, BlockIter Begin2 = BlockIter(), 388 BlockIter End2 = BlockIter(), BlockIter Begin3 = BlockIter(), 389 BlockIter End3 = BlockIter()) 390 : Begin1(Begin1), End1(End1), Begin2(Begin2), End2(End2), Begin3(Begin3), 391 End3(End3) {} 392 393 template <typename F> void forEach(const F &Func) const { 394 for (auto It = Begin1; It != End1; It++) 395 Func(*It); 396 for (auto It = Begin2; It != End2; It++) 397 Func(*It); 398 for (auto It = Begin3; It != End3; It++) 399 Func(*It); 400 } 401 402 std::vector<Block *> getBlocks() const { 403 std::vector<Block *> Result; 404 Result.reserve(std::distance(Begin1, End1) + std::distance(Begin2, End2) + 405 std::distance(Begin3, End3)); 406 Result.insert(Result.end(), Begin1, End1); 407 Result.insert(Result.end(), Begin2, End2); 408 Result.insert(Result.end(), Begin3, End3); 409 return Result; 410 } 411 412 const Block *getFirstBlock() const { return *Begin1; } 413 414 private: 415 BlockIter Begin1; 416 BlockIter End1; 417 BlockIter Begin2; 418 BlockIter End2; 419 BlockIter Begin3; 420 BlockIter End3; 421 }; 422 423 /// The implementation of the ExtTSP algorithm. 424 class ExtTSPImpl { 425 using EdgeT = std::pair<uint64_t, uint64_t>; 426 using EdgeCountMap = DenseMap<EdgeT, uint64_t>; 427 428 public: 429 ExtTSPImpl(size_t NumNodes, const std::vector<uint64_t> &NodeSizes, 430 const std::vector<uint64_t> &NodeCounts, 431 const EdgeCountMap &EdgeCounts) 432 : NumNodes(NumNodes) { 433 initialize(NodeSizes, NodeCounts, EdgeCounts); 434 } 435 436 /// Run the algorithm and return an optimized ordering of blocks. 437 void run(std::vector<uint64_t> &Result) { 438 // Pass 1: Merge blocks with their mutually forced successors 439 mergeForcedPairs(); 440 441 // Pass 2: Merge pairs of chains while improving the ExtTSP objective 442 mergeChainPairs(); 443 444 // Pass 3: Merge cold blocks to reduce code size 445 mergeColdChains(); 446 447 // Collect blocks from all chains 448 concatChains(Result); 449 } 450 451 private: 452 /// Initialize the algorithm's data structures. 453 void initialize(const std::vector<uint64_t> &NodeSizes, 454 const std::vector<uint64_t> &NodeCounts, 455 const EdgeCountMap &EdgeCounts) { 456 // Initialize blocks 457 AllBlocks.reserve(NumNodes); 458 for (uint64_t Node = 0; Node < NumNodes; Node++) { 459 uint64_t Size = std::max<uint64_t>(NodeSizes[Node], 1ULL); 460 uint64_t ExecutionCount = NodeCounts[Node]; 461 // The execution count of the entry block is set to at least 1 462 if (Node == 0 && ExecutionCount == 0) 463 ExecutionCount = 1; 464 AllBlocks.emplace_back(Node, Size, ExecutionCount); 465 } 466 467 // Initialize jumps between blocks 468 SuccNodes = std::vector<std::vector<uint64_t>>(NumNodes); 469 PredNodes = std::vector<std::vector<uint64_t>>(NumNodes); 470 AllJumps.reserve(EdgeCounts.size()); 471 for (auto It : EdgeCounts) { 472 auto Pred = It.first.first; 473 auto Succ = It.first.second; 474 // Ignore self-edges 475 if (Pred == Succ) 476 continue; 477 478 SuccNodes[Pred].push_back(Succ); 479 PredNodes[Succ].push_back(Pred); 480 auto ExecutionCount = It.second; 481 if (ExecutionCount > 0) { 482 auto &Block = AllBlocks[Pred]; 483 auto &SuccBlock = AllBlocks[Succ]; 484 AllJumps.emplace_back(&Block, &SuccBlock, ExecutionCount); 485 SuccBlock.InJumps.push_back(&AllJumps.back()); 486 Block.OutJumps.push_back(&AllJumps.back()); 487 } 488 } 489 490 // Initialize chains 491 AllChains.reserve(NumNodes); 492 HotChains.reserve(NumNodes); 493 for (auto &Block : AllBlocks) { 494 AllChains.emplace_back(Block.Index, &Block); 495 Block.CurChain = &AllChains.back(); 496 if (Block.ExecutionCount > 0) { 497 HotChains.push_back(&AllChains.back()); 498 } 499 } 500 501 // Initialize chain edges 502 AllEdges.reserve(AllJumps.size()); 503 for (auto &Block : AllBlocks) { 504 for (auto &Jump : Block.OutJumps) { 505 const auto SuccBlock = Jump->Target; 506 auto CurEdge = Block.CurChain->getEdge(SuccBlock->CurChain); 507 // this edge is already present in the graph 508 if (CurEdge != nullptr) { 509 assert(SuccBlock->CurChain->getEdge(Block.CurChain) != nullptr); 510 CurEdge->appendJump(Jump); 511 continue; 512 } 513 // this is a new edge 514 AllEdges.emplace_back(Jump); 515 Block.CurChain->addEdge(SuccBlock->CurChain, &AllEdges.back()); 516 SuccBlock->CurChain->addEdge(Block.CurChain, &AllEdges.back()); 517 } 518 } 519 } 520 521 /// For a pair of blocks, A and B, block B is the forced successor of A, 522 /// if (i) all jumps (based on profile) from A goes to B and (ii) all jumps 523 /// to B are from A. Such blocks should be adjacent in the optimal ordering; 524 /// the method finds and merges such pairs of blocks. 525 void mergeForcedPairs() { 526 // Find fallthroughs based on edge weights 527 for (auto &Block : AllBlocks) { 528 if (SuccNodes[Block.Index].size() == 1 && 529 PredNodes[SuccNodes[Block.Index][0]].size() == 1 && 530 SuccNodes[Block.Index][0] != 0) { 531 size_t SuccIndex = SuccNodes[Block.Index][0]; 532 Block.ForcedSucc = &AllBlocks[SuccIndex]; 533 AllBlocks[SuccIndex].ForcedPred = &Block; 534 } 535 } 536 537 // There might be 'cycles' in the forced dependencies, since profile 538 // data isn't 100% accurate. Typically this is observed in loops, when the 539 // loop edges are the hottest successors for the basic blocks of the loop. 540 // Break the cycles by choosing the block with the smallest index as the 541 // head. This helps to keep the original order of the loops, which likely 542 // have already been rotated in the optimized manner. 543 for (auto &Block : AllBlocks) { 544 if (Block.ForcedSucc == nullptr || Block.ForcedPred == nullptr) 545 continue; 546 547 auto SuccBlock = Block.ForcedSucc; 548 while (SuccBlock != nullptr && SuccBlock != &Block) { 549 SuccBlock = SuccBlock->ForcedSucc; 550 } 551 if (SuccBlock == nullptr) 552 continue; 553 // Break the cycle 554 AllBlocks[Block.ForcedPred->Index].ForcedSucc = nullptr; 555 Block.ForcedPred = nullptr; 556 } 557 558 // Merge blocks with their fallthrough successors 559 for (auto &Block : AllBlocks) { 560 if (Block.ForcedPred == nullptr && Block.ForcedSucc != nullptr) { 561 auto CurBlock = &Block; 562 while (CurBlock->ForcedSucc != nullptr) { 563 const auto NextBlock = CurBlock->ForcedSucc; 564 mergeChains(Block.CurChain, NextBlock->CurChain, 0, MergeTypeTy::X_Y); 565 CurBlock = NextBlock; 566 } 567 } 568 } 569 } 570 571 /// Merge pairs of chains while improving the ExtTSP objective. 572 void mergeChainPairs() { 573 /// Deterministically compare pairs of chains 574 auto compareChainPairs = [](const Chain *A1, const Chain *B1, 575 const Chain *A2, const Chain *B2) { 576 if (A1 != A2) 577 return A1->id() < A2->id(); 578 return B1->id() < B2->id(); 579 }; 580 581 while (HotChains.size() > 1) { 582 Chain *BestChainPred = nullptr; 583 Chain *BestChainSucc = nullptr; 584 auto BestGain = MergeGainTy(); 585 // Iterate over all pairs of chains 586 for (auto ChainPred : HotChains) { 587 // Get candidates for merging with the current chain 588 for (auto EdgeIter : ChainPred->edges()) { 589 auto ChainSucc = EdgeIter.first; 590 auto ChainEdge = EdgeIter.second; 591 // Ignore loop edges 592 if (ChainPred == ChainSucc) 593 continue; 594 595 // Compute the gain of merging the two chains 596 auto CurGain = getBestMergeGain(ChainPred, ChainSucc, ChainEdge); 597 if (CurGain.score() <= EPS) 598 continue; 599 600 if (BestGain < CurGain || 601 (std::abs(CurGain.score() - BestGain.score()) < EPS && 602 compareChainPairs(ChainPred, ChainSucc, BestChainPred, 603 BestChainSucc))) { 604 BestGain = CurGain; 605 BestChainPred = ChainPred; 606 BestChainSucc = ChainSucc; 607 } 608 } 609 } 610 611 // Stop merging when there is no improvement 612 if (BestGain.score() <= EPS) 613 break; 614 615 // Merge the best pair of chains 616 mergeChains(BestChainPred, BestChainSucc, BestGain.mergeOffset(), 617 BestGain.mergeType()); 618 } 619 } 620 621 /// Merge cold blocks to reduce code size. 622 void mergeColdChains() { 623 for (size_t SrcBB = 0; SrcBB < NumNodes; SrcBB++) { 624 // Iterating over neighbors in the reverse order to make sure original 625 // fallthrough jumps are merged first 626 size_t NumSuccs = SuccNodes[SrcBB].size(); 627 for (size_t Idx = 0; Idx < NumSuccs; Idx++) { 628 auto DstBB = SuccNodes[SrcBB][NumSuccs - Idx - 1]; 629 auto SrcChain = AllBlocks[SrcBB].CurChain; 630 auto DstChain = AllBlocks[DstBB].CurChain; 631 if (SrcChain != DstChain && !DstChain->isEntry() && 632 SrcChain->blocks().back()->Index == SrcBB && 633 DstChain->blocks().front()->Index == DstBB) { 634 mergeChains(SrcChain, DstChain, 0, MergeTypeTy::X_Y); 635 } 636 } 637 } 638 } 639 640 /// Compute the Ext-TSP score for a given block order and a list of jumps. 641 double extTSPScore(const MergedChain &MergedBlocks, 642 const std::vector<Jump *> &Jumps) const { 643 if (Jumps.empty()) 644 return 0.0; 645 uint64_t CurAddr = 0; 646 MergedBlocks.forEach([&](const Block *BB) { 647 BB->EstimatedAddr = CurAddr; 648 CurAddr += BB->Size; 649 }); 650 651 double Score = 0; 652 for (auto &Jump : Jumps) { 653 const auto SrcBlock = Jump->Source; 654 const auto DstBlock = Jump->Target; 655 Score += ::extTSPScore(SrcBlock->EstimatedAddr, SrcBlock->Size, 656 DstBlock->EstimatedAddr, Jump->ExecutionCount); 657 } 658 return Score; 659 } 660 661 /// Compute the gain of merging two chains. 662 /// 663 /// The function considers all possible ways of merging two chains and 664 /// computes the one having the largest increase in ExtTSP objective. The 665 /// result is a pair with the first element being the gain and the second 666 /// element being the corresponding merging type. 667 MergeGainTy getBestMergeGain(Chain *ChainPred, Chain *ChainSucc, 668 ChainEdge *Edge) const { 669 if (Edge->hasCachedMergeGain(ChainPred, ChainSucc)) { 670 return Edge->getCachedMergeGain(ChainPred, ChainSucc); 671 } 672 673 // Precompute jumps between ChainPred and ChainSucc 674 auto Jumps = Edge->jumps(); 675 auto EdgePP = ChainPred->getEdge(ChainPred); 676 if (EdgePP != nullptr) { 677 Jumps.insert(Jumps.end(), EdgePP->jumps().begin(), EdgePP->jumps().end()); 678 } 679 assert(!Jumps.empty() && "trying to merge chains w/o jumps"); 680 681 // The object holds the best currently chosen gain of merging the two chains 682 MergeGainTy Gain = MergeGainTy(); 683 684 /// Given a merge offset and a list of merge types, try to merge two chains 685 /// and update Gain with a better alternative 686 auto tryChainMerging = [&](size_t Offset, 687 const std::vector<MergeTypeTy> &MergeTypes) { 688 // Skip merging corresponding to concatenation w/o splitting 689 if (Offset == 0 || Offset == ChainPred->blocks().size()) 690 return; 691 // Skip merging if it breaks Forced successors 692 auto BB = ChainPred->blocks()[Offset - 1]; 693 if (BB->ForcedSucc != nullptr) 694 return; 695 // Apply the merge, compute the corresponding gain, and update the best 696 // value, if the merge is beneficial 697 for (auto &MergeType : MergeTypes) { 698 Gain.updateIfLessThan( 699 computeMergeGain(ChainPred, ChainSucc, Jumps, Offset, MergeType)); 700 } 701 }; 702 703 // Try to concatenate two chains w/o splitting 704 Gain.updateIfLessThan( 705 computeMergeGain(ChainPred, ChainSucc, Jumps, 0, MergeTypeTy::X_Y)); 706 707 if (EnableChainSplitAlongJumps) { 708 // Attach (a part of) ChainPred before the first block of ChainSucc 709 for (auto &Jump : ChainSucc->blocks().front()->InJumps) { 710 const auto SrcBlock = Jump->Source; 711 if (SrcBlock->CurChain != ChainPred) 712 continue; 713 size_t Offset = SrcBlock->CurIndex + 1; 714 tryChainMerging(Offset, {MergeTypeTy::X1_Y_X2, MergeTypeTy::X2_X1_Y}); 715 } 716 717 // Attach (a part of) ChainPred after the last block of ChainSucc 718 for (auto &Jump : ChainSucc->blocks().back()->OutJumps) { 719 const auto DstBlock = Jump->Source; 720 if (DstBlock->CurChain != ChainPred) 721 continue; 722 size_t Offset = DstBlock->CurIndex; 723 tryChainMerging(Offset, {MergeTypeTy::X1_Y_X2, MergeTypeTy::Y_X2_X1}); 724 } 725 } 726 727 // Try to break ChainPred in various ways and concatenate with ChainSucc 728 if (ChainPred->blocks().size() <= ChainSplitThreshold) { 729 for (size_t Offset = 1; Offset < ChainPred->blocks().size(); Offset++) { 730 // Try to split the chain in different ways. In practice, applying 731 // X2_Y_X1 merging is almost never provides benefits; thus, we exclude 732 // it from consideration to reduce the search space 733 tryChainMerging(Offset, {MergeTypeTy::X1_Y_X2, MergeTypeTy::Y_X2_X1, 734 MergeTypeTy::X2_X1_Y}); 735 } 736 } 737 Edge->setCachedMergeGain(ChainPred, ChainSucc, Gain); 738 return Gain; 739 } 740 741 /// Compute the score gain of merging two chains, respecting a given 742 /// merge 'type' and 'offset'. 743 /// 744 /// The two chains are not modified in the method. 745 MergeGainTy computeMergeGain(const Chain *ChainPred, const Chain *ChainSucc, 746 const std::vector<Jump *> &Jumps, 747 size_t MergeOffset, 748 MergeTypeTy MergeType) const { 749 auto MergedBlocks = mergeBlocks(ChainPred->blocks(), ChainSucc->blocks(), 750 MergeOffset, MergeType); 751 752 // Do not allow a merge that does not preserve the original entry block 753 if ((ChainPred->isEntry() || ChainSucc->isEntry()) && 754 !MergedBlocks.getFirstBlock()->isEntry()) 755 return MergeGainTy(); 756 757 // The gain for the new chain 758 auto NewGainScore = extTSPScore(MergedBlocks, Jumps) - ChainPred->score(); 759 return MergeGainTy(NewGainScore, MergeOffset, MergeType); 760 } 761 762 /// Merge two chains of blocks respecting a given merge 'type' and 'offset'. 763 /// 764 /// If MergeType == 0, then the result is a concatentation of two chains. 765 /// Otherwise, the first chain is cut into two sub-chains at the offset, 766 /// and merged using all possible ways of concatenating three chains. 767 MergedChain mergeBlocks(const std::vector<Block *> &X, 768 const std::vector<Block *> &Y, size_t MergeOffset, 769 MergeTypeTy MergeType) const { 770 // Split the first chain, X, into X1 and X2 771 BlockIter BeginX1 = X.begin(); 772 BlockIter EndX1 = X.begin() + MergeOffset; 773 BlockIter BeginX2 = X.begin() + MergeOffset; 774 BlockIter EndX2 = X.end(); 775 BlockIter BeginY = Y.begin(); 776 BlockIter EndY = Y.end(); 777 778 // Construct a new chain from the three existing ones 779 switch (MergeType) { 780 case MergeTypeTy::X_Y: 781 return MergedChain(BeginX1, EndX2, BeginY, EndY); 782 case MergeTypeTy::X1_Y_X2: 783 return MergedChain(BeginX1, EndX1, BeginY, EndY, BeginX2, EndX2); 784 case MergeTypeTy::Y_X2_X1: 785 return MergedChain(BeginY, EndY, BeginX2, EndX2, BeginX1, EndX1); 786 case MergeTypeTy::X2_X1_Y: 787 return MergedChain(BeginX2, EndX2, BeginX1, EndX1, BeginY, EndY); 788 } 789 llvm_unreachable("unexpected chain merge type"); 790 } 791 792 /// Merge chain From into chain Into, update the list of active chains, 793 /// adjacency information, and the corresponding cached values. 794 void mergeChains(Chain *Into, Chain *From, size_t MergeOffset, 795 MergeTypeTy MergeType) { 796 assert(Into != From && "a chain cannot be merged with itself"); 797 798 // Merge the blocks 799 auto MergedBlocks = 800 mergeBlocks(Into->blocks(), From->blocks(), MergeOffset, MergeType); 801 Into->merge(From, MergedBlocks.getBlocks()); 802 Into->mergeEdges(From); 803 From->clear(); 804 805 // Update cached ext-tsp score for the new chain 806 auto SelfEdge = Into->getEdge(Into); 807 if (SelfEdge != nullptr) { 808 MergedBlocks = MergedChain(Into->blocks().begin(), Into->blocks().end()); 809 Into->setScore(extTSPScore(MergedBlocks, SelfEdge->jumps())); 810 } 811 812 // Remove chain From from the list of active chains 813 auto Iter = std::remove(HotChains.begin(), HotChains.end(), From); 814 HotChains.erase(Iter, HotChains.end()); 815 816 // Invalidate caches 817 for (auto EdgeIter : Into->edges()) { 818 EdgeIter.second->invalidateCache(); 819 } 820 } 821 822 /// Concatenate all chains into a final order of blocks. 823 void concatChains(std::vector<uint64_t> &Order) { 824 // Collect chains and calculate some stats for their sorting 825 std::vector<Chain *> SortedChains; 826 DenseMap<const Chain *, double> ChainDensity; 827 for (auto &Chain : AllChains) { 828 if (!Chain.blocks().empty()) { 829 SortedChains.push_back(&Chain); 830 // Using doubles to avoid overflow of ExecutionCount 831 double Size = 0; 832 double ExecutionCount = 0; 833 for (auto Block : Chain.blocks()) { 834 Size += static_cast<double>(Block->Size); 835 ExecutionCount += static_cast<double>(Block->ExecutionCount); 836 } 837 assert(Size > 0 && "a chain of zero size"); 838 ChainDensity[&Chain] = ExecutionCount / Size; 839 } 840 } 841 842 // Sorting chains by density in the decreasing order 843 std::stable_sort(SortedChains.begin(), SortedChains.end(), 844 [&](const Chain *C1, const Chain *C2) { 845 // Makre sure the original entry block is at the 846 // beginning of the order 847 if (C1->isEntry() != C2->isEntry()) { 848 return C1->isEntry(); 849 } 850 851 const double D1 = ChainDensity[C1]; 852 const double D2 = ChainDensity[C2]; 853 // Compare by density and break ties by chain identifiers 854 return (D1 != D2) ? (D1 > D2) : (C1->id() < C2->id()); 855 }); 856 857 // Collect the blocks in the order specified by their chains 858 Order.reserve(NumNodes); 859 for (auto Chain : SortedChains) { 860 for (auto Block : Chain->blocks()) { 861 Order.push_back(Block->Index); 862 } 863 } 864 } 865 866 private: 867 /// The number of nodes in the graph. 868 const size_t NumNodes; 869 870 /// Successors of each node. 871 std::vector<std::vector<uint64_t>> SuccNodes; 872 873 /// Predecessors of each node. 874 std::vector<std::vector<uint64_t>> PredNodes; 875 876 /// All basic blocks. 877 std::vector<Block> AllBlocks; 878 879 /// All jumps between blocks. 880 std::vector<Jump> AllJumps; 881 882 /// All chains of basic blocks. 883 std::vector<Chain> AllChains; 884 885 /// All edges between chains. 886 std::vector<ChainEdge> AllEdges; 887 888 /// Active chains. The vector gets updated at runtime when chains are merged. 889 std::vector<Chain *> HotChains; 890 }; 891 892 } // end of anonymous namespace 893 894 std::vector<uint64_t> llvm::applyExtTspLayout( 895 const std::vector<uint64_t> &NodeSizes, 896 const std::vector<uint64_t> &NodeCounts, 897 const DenseMap<std::pair<uint64_t, uint64_t>, uint64_t> &EdgeCounts) { 898 size_t NumNodes = NodeSizes.size(); 899 900 // Verify correctness of the input data. 901 assert(NodeCounts.size() == NodeSizes.size() && "Incorrect input"); 902 assert(NumNodes > 2 && "Incorrect input"); 903 904 // Apply the reordering algorithm. 905 auto Alg = ExtTSPImpl(NumNodes, NodeSizes, NodeCounts, EdgeCounts); 906 std::vector<uint64_t> Result; 907 Alg.run(Result); 908 909 // Verify correctness of the output. 910 assert(Result.front() == 0 && "Original entry point is not preserved"); 911 assert(Result.size() == NumNodes && "Incorrect size of reordered layout"); 912 return Result; 913 } 914 915 double llvm::calcExtTspScore( 916 const std::vector<uint64_t> &Order, const std::vector<uint64_t> &NodeSizes, 917 const std::vector<uint64_t> &NodeCounts, 918 const DenseMap<std::pair<uint64_t, uint64_t>, uint64_t> &EdgeCounts) { 919 // Estimate addresses of the blocks in memory 920 auto Addr = std::vector<uint64_t>(NodeSizes.size(), 0); 921 for (size_t Idx = 1; Idx < Order.size(); Idx++) { 922 Addr[Order[Idx]] = Addr[Order[Idx - 1]] + NodeSizes[Order[Idx - 1]]; 923 } 924 925 // Increase the score for each jump 926 double Score = 0; 927 for (auto It : EdgeCounts) { 928 auto Pred = It.first.first; 929 auto Succ = It.first.second; 930 uint64_t Count = It.second; 931 Score += extTSPScore(Addr[Pred], NodeSizes[Pred], Addr[Succ], Count); 932 } 933 return Score; 934 } 935 936 double llvm::calcExtTspScore( 937 const std::vector<uint64_t> &NodeSizes, 938 const std::vector<uint64_t> &NodeCounts, 939 const DenseMap<std::pair<uint64_t, uint64_t>, uint64_t> &EdgeCounts) { 940 auto Order = std::vector<uint64_t>(NodeSizes.size()); 941 for (size_t Idx = 0; Idx < NodeSizes.size(); Idx++) { 942 Order[Idx] = Idx; 943 } 944 return calcExtTspScore(Order, NodeSizes, NodeCounts, EdgeCounts); 945 } 946