1 //===- bolt/Passes/SplitFunctions.cpp - Pass for splitting function code --===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SplitFunctions pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "bolt/Passes/SplitFunctions.h" 14 #include "bolt/Core/BinaryBasicBlock.h" 15 #include "bolt/Core/BinaryFunction.h" 16 #include "bolt/Core/FunctionLayout.h" 17 #include "bolt/Core/ParallelUtilities.h" 18 #include "bolt/Utils/CommandLineOpts.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/Sequence.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Support/CommandLine.h" 24 #include "llvm/Support/FormatVariadic.h" 25 #include <algorithm> 26 #include <iterator> 27 #include <memory> 28 #include <numeric> 29 #include <random> 30 #include <vector> 31 32 #define DEBUG_TYPE "bolt-opts" 33 34 using namespace llvm; 35 using namespace bolt; 36 37 namespace { 38 class DeprecatedSplitFunctionOptionParser : public cl::parser<bool> { 39 public: 40 explicit DeprecatedSplitFunctionOptionParser(cl::Option &O) 41 : cl::parser<bool>(O) {} 42 43 bool parse(cl::Option &O, StringRef ArgName, StringRef Arg, bool &Value) { 44 if (Arg == "2" || Arg == "3") { 45 Value = true; 46 errs() << formatv("BOLT-WARNING: specifying non-boolean value \"{0}\" " 47 "for option -{1} is deprecated\n", 48 Arg, ArgName); 49 return false; 50 } 51 return cl::parser<bool>::parse(O, ArgName, Arg, Value); 52 } 53 }; 54 } // namespace 55 56 namespace opts { 57 58 extern cl::OptionCategory BoltOptCategory; 59 60 extern cl::opt<bool> SplitEH; 61 extern cl::opt<unsigned> ExecutionCountThreshold; 62 extern cl::opt<uint32_t> RandomSeed; 63 64 static cl::opt<bool> AggressiveSplitting( 65 "split-all-cold", cl::desc("outline as many cold basic blocks as possible"), 66 cl::cat(BoltOptCategory)); 67 68 static cl::opt<unsigned> SplitAlignThreshold( 69 "split-align-threshold", 70 cl::desc("when deciding to split a function, apply this alignment " 71 "while doing the size comparison (see -split-threshold). " 72 "Default value: 2."), 73 cl::init(2), 74 75 cl::Hidden, cl::cat(BoltOptCategory)); 76 77 static cl::opt<bool, false, DeprecatedSplitFunctionOptionParser> 78 SplitFunctions("split-functions", 79 cl::desc("split functions into fragments"), 80 cl::cat(BoltOptCategory)); 81 82 static cl::opt<unsigned> SplitThreshold( 83 "split-threshold", 84 cl::desc("split function only if its main size is reduced by more than " 85 "given amount of bytes. Default value: 0, i.e. split iff the " 86 "size is reduced. Note that on some architectures the size can " 87 "increase after splitting."), 88 cl::init(0), cl::Hidden, cl::cat(BoltOptCategory)); 89 90 static cl::opt<SplitFunctionsStrategy> SplitStrategy( 91 "split-strategy", cl::init(SplitFunctionsStrategy::Profile2), 92 cl::values(clEnumValN(SplitFunctionsStrategy::Profile2, "profile2", 93 "split each function into a hot and cold fragment " 94 "using profiling information")), 95 cl::values(clEnumValN(SplitFunctionsStrategy::CDSplit, "cdsplit", 96 "split each function into a hot, warm, and cold " 97 "fragment using profiling information")), 98 cl::values(clEnumValN( 99 SplitFunctionsStrategy::Random2, "random2", 100 "split each function into a hot and cold fragment at a randomly chosen " 101 "split point (ignoring any available profiling information)")), 102 cl::values(clEnumValN( 103 SplitFunctionsStrategy::RandomN, "randomN", 104 "split each function into N fragments at a randomly chosen split " 105 "points (ignoring any available profiling information)")), 106 cl::values(clEnumValN( 107 SplitFunctionsStrategy::All, "all", 108 "split all basic blocks of each function into fragments such that each " 109 "fragment contains exactly a single basic block")), 110 cl::desc("strategy used to partition blocks into fragments"), 111 cl::cat(BoltOptCategory)); 112 } // namespace opts 113 114 namespace { 115 bool hasFullProfile(const BinaryFunction &BF) { 116 return llvm::all_of(BF.blocks(), [](const BinaryBasicBlock &BB) { 117 return BB.getExecutionCount() != BinaryBasicBlock::COUNT_NO_PROFILE; 118 }); 119 } 120 121 bool allBlocksCold(const BinaryFunction &BF) { 122 return llvm::all_of(BF.blocks(), [](const BinaryBasicBlock &BB) { 123 return BB.getExecutionCount() == 0; 124 }); 125 } 126 127 struct SplitProfile2 final : public SplitStrategy { 128 bool canSplit(const BinaryFunction &BF) override { 129 return BF.hasValidProfile() && hasFullProfile(BF) && !allBlocksCold(BF); 130 } 131 132 bool compactFragments() override { return true; } 133 134 void fragment(const BlockIt Start, const BlockIt End) override { 135 for (BinaryBasicBlock *const BB : llvm::make_range(Start, End)) { 136 if (BB->getExecutionCount() == 0) 137 BB->setFragmentNum(FragmentNum::cold()); 138 } 139 } 140 }; 141 142 struct SplitCacheDirected final : public SplitStrategy { 143 using BasicBlockOrder = BinaryFunction::BasicBlockOrderType; 144 145 bool canSplit(const BinaryFunction &BF) override { 146 return BF.hasValidProfile() && hasFullProfile(BF) && !allBlocksCold(BF); 147 } 148 149 // When some functions are hot-warm split and others are hot-warm-cold split, 150 // we do not want to change the fragment numbers of the blocks in the hot-warm 151 // split functions. 152 bool compactFragments() override { return false; } 153 154 void fragment(const BlockIt Start, const BlockIt End) override { 155 BasicBlockOrder BlockOrder(Start, End); 156 BinaryFunction &BF = *BlockOrder.front()->getFunction(); 157 158 size_t BestSplitIndex = findSplitIndex(BF, BlockOrder); 159 160 // Assign fragments based on the computed best split index. 161 // All basic blocks with index up to the best split index become hot. 162 // All remaining blocks are warm / cold depending on if count is 163 // greater than zero or not. 164 for (size_t Index = 0; Index < BlockOrder.size(); Index++) { 165 BinaryBasicBlock *BB = BlockOrder[Index]; 166 if (Index <= BestSplitIndex) 167 BB->setFragmentNum(FragmentNum::main()); 168 else 169 BB->setFragmentNum(BB->getKnownExecutionCount() > 0 170 ? FragmentNum::warm() 171 : FragmentNum::cold()); 172 } 173 } 174 175 private: 176 /// Find the best index for splitting. The returned value is the index of the 177 /// last hot basic block. Hence, "no splitting" is equivalent to returning the 178 /// value which is one less than the size of the function. 179 size_t findSplitIndex(const BinaryFunction &BF, 180 const BasicBlockOrder &BlockOrder) { 181 // Placeholder: hot-warm split after entry block. 182 return 0; 183 } 184 }; 185 186 struct SplitRandom2 final : public SplitStrategy { 187 std::minstd_rand0 Gen; 188 189 SplitRandom2() : Gen(opts::RandomSeed.getValue()) {} 190 191 bool canSplit(const BinaryFunction &BF) override { return true; } 192 193 bool compactFragments() override { return true; } 194 195 void fragment(const BlockIt Start, const BlockIt End) override { 196 using DiffT = typename std::iterator_traits<BlockIt>::difference_type; 197 const DiffT NumBlocks = End - Start; 198 assert(NumBlocks > 0 && "Cannot fragment empty function"); 199 200 // We want to split at least one block 201 const auto LastSplitPoint = std::max<DiffT>(NumBlocks - 1, 1); 202 std::uniform_int_distribution<DiffT> Dist(1, LastSplitPoint); 203 const DiffT SplitPoint = Dist(Gen); 204 for (BinaryBasicBlock *BB : llvm::make_range(Start + SplitPoint, End)) 205 BB->setFragmentNum(FragmentNum::cold()); 206 207 LLVM_DEBUG(dbgs() << formatv("BOLT-DEBUG: randomly chose last {0} (out of " 208 "{1} possible) blocks to split\n", 209 NumBlocks - SplitPoint, End - Start)); 210 } 211 }; 212 213 struct SplitRandomN final : public SplitStrategy { 214 std::minstd_rand0 Gen; 215 216 SplitRandomN() : Gen(opts::RandomSeed.getValue()) {} 217 218 bool canSplit(const BinaryFunction &BF) override { return true; } 219 220 bool compactFragments() override { return true; } 221 222 void fragment(const BlockIt Start, const BlockIt End) override { 223 using DiffT = typename std::iterator_traits<BlockIt>::difference_type; 224 const DiffT NumBlocks = End - Start; 225 assert(NumBlocks > 0 && "Cannot fragment empty function"); 226 227 // With n blocks, there are n-1 places to split them. 228 const DiffT MaximumSplits = NumBlocks - 1; 229 // We want to generate at least two fragment if possible, but if there is 230 // only one block, no splits are possible. 231 const auto MinimumSplits = std::min<DiffT>(MaximumSplits, 1); 232 std::uniform_int_distribution<DiffT> Dist(MinimumSplits, MaximumSplits); 233 // Choose how many splits to perform 234 const DiffT NumSplits = Dist(Gen); 235 236 // Draw split points from a lottery 237 SmallVector<unsigned, 0> Lottery(MaximumSplits); 238 // Start lottery at 1, because there is no meaningful splitpoint before the 239 // first block. 240 std::iota(Lottery.begin(), Lottery.end(), 1u); 241 std::shuffle(Lottery.begin(), Lottery.end(), Gen); 242 Lottery.resize(NumSplits); 243 llvm::sort(Lottery); 244 245 // Add one past the end entry to lottery 246 Lottery.push_back(NumBlocks); 247 248 unsigned LotteryIndex = 0; 249 unsigned BBPos = 0; 250 for (BinaryBasicBlock *const BB : make_range(Start, End)) { 251 // Check whether to start new fragment 252 if (BBPos >= Lottery[LotteryIndex]) 253 ++LotteryIndex; 254 255 // Because LotteryIndex is 0 based and cold fragments are 1 based, we can 256 // use the index to assign fragments. 257 BB->setFragmentNum(FragmentNum(LotteryIndex)); 258 259 ++BBPos; 260 } 261 } 262 }; 263 264 struct SplitAll final : public SplitStrategy { 265 bool canSplit(const BinaryFunction &BF) override { return true; } 266 267 bool compactFragments() override { 268 // Keeping empty fragments allows us to test, that empty fragments do not 269 // generate symbols. 270 return false; 271 } 272 273 void fragment(const BlockIt Start, const BlockIt End) override { 274 unsigned Fragment = 0; 275 for (BinaryBasicBlock *const BB : llvm::make_range(Start, End)) 276 BB->setFragmentNum(FragmentNum(Fragment++)); 277 } 278 }; 279 } // namespace 280 281 namespace llvm { 282 namespace bolt { 283 284 bool SplitFunctions::shouldOptimize(const BinaryFunction &BF) const { 285 // Apply execution count threshold 286 if (BF.getKnownExecutionCount() < opts::ExecutionCountThreshold) 287 return false; 288 289 return BinaryFunctionPass::shouldOptimize(BF); 290 } 291 292 void SplitFunctions::runOnFunctions(BinaryContext &BC) { 293 if (!opts::SplitFunctions) 294 return; 295 296 // If split strategy is not CDSplit, then a second run of the pass is not 297 // needed after function reordering. 298 if (BC.HasFinalizedFunctionOrder && 299 opts::SplitStrategy != SplitFunctionsStrategy::CDSplit) 300 return; 301 302 std::unique_ptr<SplitStrategy> Strategy; 303 bool ForceSequential = false; 304 305 switch (opts::SplitStrategy) { 306 case SplitFunctionsStrategy::CDSplit: 307 // CDSplit runs two splitting passes: hot-cold splitting (SplitPrfoile2) 308 // before function reordering and hot-warm-cold splitting 309 // (SplitCacheDirected) after function reordering. 310 if (BC.HasFinalizedFunctionOrder) 311 Strategy = std::make_unique<SplitCacheDirected>(); 312 else 313 Strategy = std::make_unique<SplitProfile2>(); 314 opts::AggressiveSplitting = true; 315 BC.HasWarmSection = true; 316 break; 317 case SplitFunctionsStrategy::Profile2: 318 Strategy = std::make_unique<SplitProfile2>(); 319 break; 320 case SplitFunctionsStrategy::Random2: 321 Strategy = std::make_unique<SplitRandom2>(); 322 // If we split functions randomly, we need to ensure that across runs with 323 // the same input, we generate random numbers for each function in the same 324 // order. 325 ForceSequential = true; 326 break; 327 case SplitFunctionsStrategy::RandomN: 328 Strategy = std::make_unique<SplitRandomN>(); 329 ForceSequential = true; 330 break; 331 case SplitFunctionsStrategy::All: 332 Strategy = std::make_unique<SplitAll>(); 333 break; 334 } 335 336 ParallelUtilities::PredicateTy SkipFunc = [&](const BinaryFunction &BF) { 337 return !shouldOptimize(BF); 338 }; 339 340 ParallelUtilities::runOnEachFunction( 341 BC, ParallelUtilities::SchedulingPolicy::SP_BB_LINEAR, 342 [&](BinaryFunction &BF) { splitFunction(BF, *Strategy); }, SkipFunc, 343 "SplitFunctions", ForceSequential); 344 345 if (SplitBytesHot + SplitBytesCold > 0) 346 outs() << "BOLT-INFO: splitting separates " << SplitBytesHot 347 << " hot bytes from " << SplitBytesCold << " cold bytes " 348 << format("(%.2lf%% of split functions is hot).\n", 349 100.0 * SplitBytesHot / (SplitBytesHot + SplitBytesCold)); 350 } 351 352 void SplitFunctions::splitFunction(BinaryFunction &BF, SplitStrategy &S) { 353 if (BF.empty()) 354 return; 355 356 if (!S.canSplit(BF)) 357 return; 358 359 FunctionLayout &Layout = BF.getLayout(); 360 BinaryFunction::BasicBlockOrderType PreSplitLayout(Layout.block_begin(), 361 Layout.block_end()); 362 363 BinaryContext &BC = BF.getBinaryContext(); 364 size_t OriginalHotSize; 365 size_t HotSize; 366 size_t ColdSize; 367 if (BC.isX86()) { 368 std::tie(OriginalHotSize, ColdSize) = BC.calculateEmittedSize(BF); 369 LLVM_DEBUG(dbgs() << "Estimated size for function " << BF 370 << " pre-split is <0x" 371 << Twine::utohexstr(OriginalHotSize) << ", 0x" 372 << Twine::utohexstr(ColdSize) << ">\n"); 373 } 374 375 BinaryFunction::BasicBlockOrderType NewLayout(Layout.block_begin(), 376 Layout.block_end()); 377 // Never outline the first basic block. 378 NewLayout.front()->setCanOutline(false); 379 for (BinaryBasicBlock *const BB : NewLayout) { 380 if (!BB->canOutline()) 381 continue; 382 383 // Do not split extra entry points in aarch64. They can be referred by 384 // using ADRs and when this happens, these blocks cannot be placed far 385 // away due to the limited range in ADR instruction. 386 if (BC.isAArch64() && BB->isEntryPoint()) { 387 BB->setCanOutline(false); 388 continue; 389 } 390 391 if (BF.hasEHRanges() && !opts::SplitEH) { 392 // We cannot move landing pads (or rather entry points for landing pads). 393 if (BB->isLandingPad()) { 394 BB->setCanOutline(false); 395 continue; 396 } 397 // We cannot move a block that can throw since exception-handling 398 // runtime cannot deal with split functions. However, if we can guarantee 399 // that the block never throws, it is safe to move the block to 400 // decrease the size of the function. 401 for (MCInst &Instr : *BB) { 402 if (BC.MIB->isInvoke(Instr)) { 403 BB->setCanOutline(false); 404 break; 405 } 406 } 407 } 408 } 409 410 BF.getLayout().updateLayoutIndices(); 411 S.fragment(NewLayout.begin(), NewLayout.end()); 412 413 // Make sure all non-outlineable blocks are in the main-fragment. 414 for (BinaryBasicBlock *const BB : NewLayout) { 415 if (!BB->canOutline()) 416 BB->setFragmentNum(FragmentNum::main()); 417 } 418 419 if (opts::AggressiveSplitting) { 420 // All blocks with 0 count that we can move go to the end of the function. 421 // Even if they were natural to cluster formation and were seen in-between 422 // hot basic blocks. 423 llvm::stable_sort(NewLayout, [&](const BinaryBasicBlock *const A, 424 const BinaryBasicBlock *const B) { 425 return A->getFragmentNum() < B->getFragmentNum(); 426 }); 427 } else if (BF.hasEHRanges() && !opts::SplitEH) { 428 // Typically functions with exception handling have landing pads at the end. 429 // We cannot move beginning of landing pads, but we can move 0-count blocks 430 // comprising landing pads to the end and thus facilitate splitting. 431 auto FirstLP = NewLayout.begin(); 432 while ((*FirstLP)->isLandingPad()) 433 ++FirstLP; 434 435 std::stable_sort(FirstLP, NewLayout.end(), 436 [&](BinaryBasicBlock *A, BinaryBasicBlock *B) { 437 return A->getFragmentNum() < B->getFragmentNum(); 438 }); 439 } 440 441 // Make sure that fragments are increasing. 442 FragmentNum CurrentFragment = NewLayout.back()->getFragmentNum(); 443 for (BinaryBasicBlock *const BB : reverse(NewLayout)) { 444 if (BB->getFragmentNum() > CurrentFragment) 445 BB->setFragmentNum(CurrentFragment); 446 CurrentFragment = BB->getFragmentNum(); 447 } 448 449 if (S.compactFragments()) { 450 FragmentNum CurrentFragment = FragmentNum::main(); 451 FragmentNum NewFragment = FragmentNum::main(); 452 for (BinaryBasicBlock *const BB : NewLayout) { 453 if (BB->getFragmentNum() > CurrentFragment) { 454 CurrentFragment = BB->getFragmentNum(); 455 NewFragment = FragmentNum(NewFragment.get() + 1); 456 } 457 BB->setFragmentNum(NewFragment); 458 } 459 } 460 461 const bool LayoutUpdated = BF.getLayout().update(NewLayout); 462 463 // For shared objects, invoke instructions and corresponding landing pads 464 // have to be placed in the same fragment. When we split them, create 465 // trampoline landing pads that will redirect the execution to real LPs. 466 TrampolineSetType Trampolines; 467 if (!BC.HasFixedLoadAddress && BF.hasEHRanges() && BF.isSplit()) 468 Trampolines = createEHTrampolines(BF); 469 470 // Check the new size to see if it's worth splitting the function. 471 if (BC.isX86() && LayoutUpdated) { 472 std::tie(HotSize, ColdSize) = BC.calculateEmittedSize(BF); 473 LLVM_DEBUG(dbgs() << "Estimated size for function " << BF 474 << " post-split is <0x" << Twine::utohexstr(HotSize) 475 << ", 0x" << Twine::utohexstr(ColdSize) << ">\n"); 476 if (alignTo(OriginalHotSize, opts::SplitAlignThreshold) <= 477 alignTo(HotSize, opts::SplitAlignThreshold) + opts::SplitThreshold) { 478 if (opts::Verbosity >= 2) { 479 outs() << "BOLT-INFO: Reversing splitting of function " 480 << formatv("{0}:\n {1:x}, {2:x} -> {3:x}\n", BF, HotSize, 481 ColdSize, OriginalHotSize); 482 } 483 484 // Reverse the action of createEHTrampolines(). The trampolines will be 485 // placed immediately before the matching destination resulting in no 486 // extra code. 487 if (PreSplitLayout.size() != BF.size()) 488 PreSplitLayout = mergeEHTrampolines(BF, PreSplitLayout, Trampolines); 489 490 for (BinaryBasicBlock &BB : BF) 491 BB.setFragmentNum(FragmentNum::main()); 492 BF.getLayout().update(PreSplitLayout); 493 } else { 494 SplitBytesHot += HotSize; 495 SplitBytesCold += ColdSize; 496 } 497 } 498 499 // Fix branches if the splitting decision of the pass after function 500 // reordering is different from that of the pass before function reordering. 501 if (LayoutUpdated && BC.HasFinalizedFunctionOrder) 502 BF.fixBranches(); 503 } 504 505 SplitFunctions::TrampolineSetType 506 SplitFunctions::createEHTrampolines(BinaryFunction &BF) const { 507 const auto &MIB = BF.getBinaryContext().MIB; 508 509 // Map real landing pads to the corresponding trampolines. 510 TrampolineSetType LPTrampolines; 511 512 // Iterate over the copy of basic blocks since we are adding new blocks to the 513 // function which will invalidate its iterators. 514 std::vector<BinaryBasicBlock *> Blocks(BF.pbegin(), BF.pend()); 515 for (BinaryBasicBlock *BB : Blocks) { 516 for (MCInst &Instr : *BB) { 517 const std::optional<MCPlus::MCLandingPad> EHInfo = MIB->getEHInfo(Instr); 518 if (!EHInfo || !EHInfo->first) 519 continue; 520 521 const MCSymbol *LPLabel = EHInfo->first; 522 BinaryBasicBlock *LPBlock = BF.getBasicBlockForLabel(LPLabel); 523 if (BB->getFragmentNum() == LPBlock->getFragmentNum()) 524 continue; 525 526 const MCSymbol *TrampolineLabel = nullptr; 527 const TrampolineKey Key(BB->getFragmentNum(), LPLabel); 528 auto Iter = LPTrampolines.find(Key); 529 if (Iter != LPTrampolines.end()) { 530 TrampolineLabel = Iter->second; 531 } else { 532 // Create a trampoline basic block in the same fragment as the thrower. 533 // Note: there's no need to insert the jump instruction, it will be 534 // added by fixBranches(). 535 BinaryBasicBlock *TrampolineBB = BF.addBasicBlock(); 536 TrampolineBB->setFragmentNum(BB->getFragmentNum()); 537 TrampolineBB->setExecutionCount(LPBlock->getExecutionCount()); 538 TrampolineBB->addSuccessor(LPBlock, TrampolineBB->getExecutionCount()); 539 TrampolineBB->setCFIState(LPBlock->getCFIState()); 540 TrampolineLabel = TrampolineBB->getLabel(); 541 LPTrampolines.insert(std::make_pair(Key, TrampolineLabel)); 542 } 543 544 // Substitute the landing pad with the trampoline. 545 MIB->updateEHInfo(Instr, 546 MCPlus::MCLandingPad(TrampolineLabel, EHInfo->second)); 547 } 548 } 549 550 if (LPTrampolines.empty()) 551 return LPTrampolines; 552 553 // All trampoline blocks were added to the end of the function. Place them at 554 // the end of corresponding fragments. 555 BinaryFunction::BasicBlockOrderType NewLayout(BF.getLayout().block_begin(), 556 BF.getLayout().block_end()); 557 stable_sort(NewLayout, [&](BinaryBasicBlock *A, BinaryBasicBlock *B) { 558 return A->getFragmentNum() < B->getFragmentNum(); 559 }); 560 BF.getLayout().update(NewLayout); 561 562 // Conservatively introduce branch instructions. 563 BF.fixBranches(); 564 565 // Update exception-handling CFG for the function. 566 BF.recomputeLandingPads(); 567 568 return LPTrampolines; 569 } 570 571 SplitFunctions::BasicBlockOrderType SplitFunctions::mergeEHTrampolines( 572 BinaryFunction &BF, SplitFunctions::BasicBlockOrderType &Layout, 573 const SplitFunctions::TrampolineSetType &Trampolines) const { 574 DenseMap<const MCSymbol *, SmallVector<const MCSymbol *, 0>> 575 IncomingTrampolines; 576 for (const auto &Entry : Trampolines) { 577 IncomingTrampolines[Entry.getFirst().Target].emplace_back( 578 Entry.getSecond()); 579 } 580 581 BasicBlockOrderType MergedLayout; 582 for (BinaryBasicBlock *BB : Layout) { 583 auto Iter = IncomingTrampolines.find(BB->getLabel()); 584 if (Iter != IncomingTrampolines.end()) { 585 for (const MCSymbol *const Trampoline : Iter->getSecond()) { 586 BinaryBasicBlock *LPBlock = BF.getBasicBlockForLabel(Trampoline); 587 assert(LPBlock && "Could not find matching landing pad block."); 588 MergedLayout.push_back(LPBlock); 589 } 590 } 591 MergedLayout.push_back(BB); 592 } 593 594 return MergedLayout; 595 } 596 597 } // namespace bolt 598 } // namespace llvm 599