1 //===- CodeExtractor.cpp - Pull code region into a new function -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the interface to tear out a code region, such as an 11 // individual loop or a parallel section, into a new function, replacing it with 12 // a call to the new function. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/Utils/CodeExtractor.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/BlockFrequencyInfoImpl.h" 26 #include "llvm/Analysis/BranchProbabilityInfo.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/IR/Argument.h" 29 #include "llvm/IR/Attributes.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GlobalValue.h" 39 #include "llvm/IR/InstrTypes.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/IR/MDBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/IR/User.h" 49 #include "llvm/IR/Value.h" 50 #include "llvm/IR/Verifier.h" 51 #include "llvm/Pass.h" 52 #include "llvm/Support/BlockFrequency.h" 53 #include "llvm/Support/BranchProbability.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/ErrorHandling.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 60 #include <cassert> 61 #include <cstdint> 62 #include <iterator> 63 #include <map> 64 #include <set> 65 #include <utility> 66 #include <vector> 67 68 using namespace llvm; 69 using ProfileCount = Function::ProfileCount; 70 71 #define DEBUG_TYPE "code-extractor" 72 73 // Provide a command-line option to aggregate function arguments into a struct 74 // for functions produced by the code extractor. This is useful when converting 75 // extracted functions to pthread-based code, as only one argument (void*) can 76 // be passed in to pthread_create(). 77 static cl::opt<bool> 78 AggregateArgsOpt("aggregate-extracted-args", cl::Hidden, 79 cl::desc("Aggregate arguments to code-extracted functions")); 80 81 /// Test whether a block is valid for extraction. 82 static bool isBlockValidForExtraction(const BasicBlock &BB, 83 const SetVector<BasicBlock *> &Result, 84 bool AllowVarArgs, bool AllowAlloca) { 85 // taking the address of a basic block moved to another function is illegal 86 if (BB.hasAddressTaken()) 87 return false; 88 89 // don't hoist code that uses another basicblock address, as it's likely to 90 // lead to unexpected behavior, like cross-function jumps 91 SmallPtrSet<User const *, 16> Visited; 92 SmallVector<User const *, 16> ToVisit; 93 94 for (Instruction const &Inst : BB) 95 ToVisit.push_back(&Inst); 96 97 while (!ToVisit.empty()) { 98 User const *Curr = ToVisit.pop_back_val(); 99 if (!Visited.insert(Curr).second) 100 continue; 101 if (isa<BlockAddress const>(Curr)) 102 return false; // even a reference to self is likely to be not compatible 103 104 if (isa<Instruction>(Curr) && cast<Instruction>(Curr)->getParent() != &BB) 105 continue; 106 107 for (auto const &U : Curr->operands()) { 108 if (auto *UU = dyn_cast<User>(U)) 109 ToVisit.push_back(UU); 110 } 111 } 112 113 // If explicitly requested, allow vastart and alloca. For invoke instructions 114 // verify that extraction is valid. 115 for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) { 116 if (isa<AllocaInst>(I)) { 117 if (!AllowAlloca) 118 return false; 119 continue; 120 } 121 122 if (const auto *II = dyn_cast<InvokeInst>(I)) { 123 // Unwind destination (either a landingpad, catchswitch, or cleanuppad) 124 // must be a part of the subgraph which is being extracted. 125 if (auto *UBB = II->getUnwindDest()) 126 if (!Result.count(UBB)) 127 return false; 128 continue; 129 } 130 131 // All catch handlers of a catchswitch instruction as well as the unwind 132 // destination must be in the subgraph. 133 if (const auto *CSI = dyn_cast<CatchSwitchInst>(I)) { 134 if (auto *UBB = CSI->getUnwindDest()) 135 if (!Result.count(UBB)) 136 return false; 137 for (auto *HBB : CSI->handlers()) 138 if (!Result.count(const_cast<BasicBlock*>(HBB))) 139 return false; 140 continue; 141 } 142 143 // Make sure that entire catch handler is within subgraph. It is sufficient 144 // to check that catch return's block is in the list. 145 if (const auto *CPI = dyn_cast<CatchPadInst>(I)) { 146 for (const auto *U : CPI->users()) 147 if (const auto *CRI = dyn_cast<CatchReturnInst>(U)) 148 if (!Result.count(const_cast<BasicBlock*>(CRI->getParent()))) 149 return false; 150 continue; 151 } 152 153 // And do similar checks for cleanup handler - the entire handler must be 154 // in subgraph which is going to be extracted. For cleanup return should 155 // additionally check that the unwind destination is also in the subgraph. 156 if (const auto *CPI = dyn_cast<CleanupPadInst>(I)) { 157 for (const auto *U : CPI->users()) 158 if (const auto *CRI = dyn_cast<CleanupReturnInst>(U)) 159 if (!Result.count(const_cast<BasicBlock*>(CRI->getParent()))) 160 return false; 161 continue; 162 } 163 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) { 164 if (auto *UBB = CRI->getUnwindDest()) 165 if (!Result.count(UBB)) 166 return false; 167 continue; 168 } 169 170 if (const CallInst *CI = dyn_cast<CallInst>(I)) 171 if (const Function *F = CI->getCalledFunction()) 172 if (F->getIntrinsicID() == Intrinsic::vastart) { 173 if (AllowVarArgs) 174 continue; 175 else 176 return false; 177 } 178 } 179 180 return true; 181 } 182 183 /// Build a set of blocks to extract if the input blocks are viable. 184 static SetVector<BasicBlock *> 185 buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, 186 bool AllowVarArgs, bool AllowAlloca) { 187 assert(!BBs.empty() && "The set of blocks to extract must be non-empty"); 188 SetVector<BasicBlock *> Result; 189 190 // Loop over the blocks, adding them to our set-vector, and aborting with an 191 // empty set if we encounter invalid blocks. 192 for (BasicBlock *BB : BBs) { 193 // If this block is dead, don't process it. 194 if (DT && !DT->isReachableFromEntry(BB)) 195 continue; 196 197 if (!Result.insert(BB)) 198 llvm_unreachable("Repeated basic blocks in extraction input"); 199 } 200 201 for (auto *BB : Result) { 202 if (!isBlockValidForExtraction(*BB, Result, AllowVarArgs, AllowAlloca)) 203 return {}; 204 205 // Make sure that the first block is not a landing pad. 206 if (BB == Result.front()) { 207 if (BB->isEHPad()) { 208 LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n"); 209 return {}; 210 } 211 continue; 212 } 213 214 // All blocks other than the first must not have predecessors outside of 215 // the subgraph which is being extracted. 216 for (auto *PBB : predecessors(BB)) 217 if (!Result.count(PBB)) { 218 LLVM_DEBUG( 219 dbgs() << "No blocks in this region may have entries from " 220 "outside the region except for the first block!\n"); 221 return {}; 222 } 223 } 224 225 return Result; 226 } 227 228 CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, 229 bool AggregateArgs, BlockFrequencyInfo *BFI, 230 BranchProbabilityInfo *BPI, bool AllowVarArgs, 231 bool AllowAlloca) 232 : DT(DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI), 233 BPI(BPI), AllowVarArgs(AllowVarArgs), 234 Blocks(buildExtractionBlockSet(BBs, DT, AllowVarArgs, AllowAlloca)) {} 235 236 CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs, 237 BlockFrequencyInfo *BFI, 238 BranchProbabilityInfo *BPI) 239 : DT(&DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI), 240 BPI(BPI), AllowVarArgs(false), 241 Blocks(buildExtractionBlockSet(L.getBlocks(), &DT, 242 /* AllowVarArgs */ false, 243 /* AllowAlloca */ false)) {} 244 245 /// definedInRegion - Return true if the specified value is defined in the 246 /// extracted region. 247 static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) { 248 if (Instruction *I = dyn_cast<Instruction>(V)) 249 if (Blocks.count(I->getParent())) 250 return true; 251 return false; 252 } 253 254 /// definedInCaller - Return true if the specified value is defined in the 255 /// function being code extracted, but not in the region being extracted. 256 /// These values must be passed in as live-ins to the function. 257 static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) { 258 if (isa<Argument>(V)) return true; 259 if (Instruction *I = dyn_cast<Instruction>(V)) 260 if (!Blocks.count(I->getParent())) 261 return true; 262 return false; 263 } 264 265 static BasicBlock *getCommonExitBlock(const SetVector<BasicBlock *> &Blocks) { 266 BasicBlock *CommonExitBlock = nullptr; 267 auto hasNonCommonExitSucc = [&](BasicBlock *Block) { 268 for (auto *Succ : successors(Block)) { 269 // Internal edges, ok. 270 if (Blocks.count(Succ)) 271 continue; 272 if (!CommonExitBlock) { 273 CommonExitBlock = Succ; 274 continue; 275 } 276 if (CommonExitBlock == Succ) 277 continue; 278 279 return true; 280 } 281 return false; 282 }; 283 284 if (any_of(Blocks, hasNonCommonExitSucc)) 285 return nullptr; 286 287 return CommonExitBlock; 288 } 289 290 bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers( 291 Instruction *Addr) const { 292 AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets()); 293 Function *Func = (*Blocks.begin())->getParent(); 294 for (BasicBlock &BB : *Func) { 295 if (Blocks.count(&BB)) 296 continue; 297 for (Instruction &II : BB) { 298 if (isa<DbgInfoIntrinsic>(II)) 299 continue; 300 301 unsigned Opcode = II.getOpcode(); 302 Value *MemAddr = nullptr; 303 switch (Opcode) { 304 case Instruction::Store: 305 case Instruction::Load: { 306 if (Opcode == Instruction::Store) { 307 StoreInst *SI = cast<StoreInst>(&II); 308 MemAddr = SI->getPointerOperand(); 309 } else { 310 LoadInst *LI = cast<LoadInst>(&II); 311 MemAddr = LI->getPointerOperand(); 312 } 313 // Global variable can not be aliased with locals. 314 if (dyn_cast<Constant>(MemAddr)) 315 break; 316 Value *Base = MemAddr->stripInBoundsConstantOffsets(); 317 if (!dyn_cast<AllocaInst>(Base) || Base == AI) 318 return false; 319 break; 320 } 321 default: { 322 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II); 323 if (IntrInst) { 324 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start || 325 IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) 326 break; 327 return false; 328 } 329 // Treat all the other cases conservatively if it has side effects. 330 if (II.mayHaveSideEffects()) 331 return false; 332 } 333 } 334 } 335 } 336 337 return true; 338 } 339 340 BasicBlock * 341 CodeExtractor::findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock) { 342 BasicBlock *SinglePredFromOutlineRegion = nullptr; 343 assert(!Blocks.count(CommonExitBlock) && 344 "Expect a block outside the region!"); 345 for (auto *Pred : predecessors(CommonExitBlock)) { 346 if (!Blocks.count(Pred)) 347 continue; 348 if (!SinglePredFromOutlineRegion) { 349 SinglePredFromOutlineRegion = Pred; 350 } else if (SinglePredFromOutlineRegion != Pred) { 351 SinglePredFromOutlineRegion = nullptr; 352 break; 353 } 354 } 355 356 if (SinglePredFromOutlineRegion) 357 return SinglePredFromOutlineRegion; 358 359 #ifndef NDEBUG 360 auto getFirstPHI = [](BasicBlock *BB) { 361 BasicBlock::iterator I = BB->begin(); 362 PHINode *FirstPhi = nullptr; 363 while (I != BB->end()) { 364 PHINode *Phi = dyn_cast<PHINode>(I); 365 if (!Phi) 366 break; 367 if (!FirstPhi) { 368 FirstPhi = Phi; 369 break; 370 } 371 } 372 return FirstPhi; 373 }; 374 // If there are any phi nodes, the single pred either exists or has already 375 // be created before code extraction. 376 assert(!getFirstPHI(CommonExitBlock) && "Phi not expected"); 377 #endif 378 379 BasicBlock *NewExitBlock = CommonExitBlock->splitBasicBlock( 380 CommonExitBlock->getFirstNonPHI()->getIterator()); 381 382 for (auto PI = pred_begin(CommonExitBlock), PE = pred_end(CommonExitBlock); 383 PI != PE;) { 384 BasicBlock *Pred = *PI++; 385 if (Blocks.count(Pred)) 386 continue; 387 Pred->getTerminator()->replaceUsesOfWith(CommonExitBlock, NewExitBlock); 388 } 389 // Now add the old exit block to the outline region. 390 Blocks.insert(CommonExitBlock); 391 return CommonExitBlock; 392 } 393 394 void CodeExtractor::findAllocas(ValueSet &SinkCands, ValueSet &HoistCands, 395 BasicBlock *&ExitBlock) const { 396 Function *Func = (*Blocks.begin())->getParent(); 397 ExitBlock = getCommonExitBlock(Blocks); 398 399 for (BasicBlock &BB : *Func) { 400 if (Blocks.count(&BB)) 401 continue; 402 for (Instruction &II : BB) { 403 auto *AI = dyn_cast<AllocaInst>(&II); 404 if (!AI) 405 continue; 406 407 // Find the pair of life time markers for address 'Addr' that are either 408 // defined inside the outline region or can legally be shrinkwrapped into 409 // the outline region. If there are not other untracked uses of the 410 // address, return the pair of markers if found; otherwise return a pair 411 // of nullptr. 412 auto GetLifeTimeMarkers = 413 [&](Instruction *Addr, bool &SinkLifeStart, 414 bool &HoistLifeEnd) -> std::pair<Instruction *, Instruction *> { 415 Instruction *LifeStart = nullptr, *LifeEnd = nullptr; 416 417 for (User *U : Addr->users()) { 418 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(U); 419 if (IntrInst) { 420 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start) { 421 // Do not handle the case where AI has multiple start markers. 422 if (LifeStart) 423 return std::make_pair<Instruction *>(nullptr, nullptr); 424 LifeStart = IntrInst; 425 } 426 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) { 427 if (LifeEnd) 428 return std::make_pair<Instruction *>(nullptr, nullptr); 429 LifeEnd = IntrInst; 430 } 431 continue; 432 } 433 // Find untracked uses of the address, bail. 434 if (!definedInRegion(Blocks, U)) 435 return std::make_pair<Instruction *>(nullptr, nullptr); 436 } 437 438 if (!LifeStart || !LifeEnd) 439 return std::make_pair<Instruction *>(nullptr, nullptr); 440 441 SinkLifeStart = !definedInRegion(Blocks, LifeStart); 442 HoistLifeEnd = !definedInRegion(Blocks, LifeEnd); 443 // Do legality Check. 444 if ((SinkLifeStart || HoistLifeEnd) && 445 !isLegalToShrinkwrapLifetimeMarkers(Addr)) 446 return std::make_pair<Instruction *>(nullptr, nullptr); 447 448 // Check to see if we have a place to do hoisting, if not, bail. 449 if (HoistLifeEnd && !ExitBlock) 450 return std::make_pair<Instruction *>(nullptr, nullptr); 451 452 return std::make_pair(LifeStart, LifeEnd); 453 }; 454 455 bool SinkLifeStart = false, HoistLifeEnd = false; 456 auto Markers = GetLifeTimeMarkers(AI, SinkLifeStart, HoistLifeEnd); 457 458 if (Markers.first) { 459 if (SinkLifeStart) 460 SinkCands.insert(Markers.first); 461 SinkCands.insert(AI); 462 if (HoistLifeEnd) 463 HoistCands.insert(Markers.second); 464 continue; 465 } 466 467 // Follow the bitcast. 468 Instruction *MarkerAddr = nullptr; 469 for (User *U : AI->users()) { 470 if (U->stripInBoundsConstantOffsets() == AI) { 471 SinkLifeStart = false; 472 HoistLifeEnd = false; 473 Instruction *Bitcast = cast<Instruction>(U); 474 Markers = GetLifeTimeMarkers(Bitcast, SinkLifeStart, HoistLifeEnd); 475 if (Markers.first) { 476 MarkerAddr = Bitcast; 477 continue; 478 } 479 } 480 481 // Found unknown use of AI. 482 if (!definedInRegion(Blocks, U)) { 483 MarkerAddr = nullptr; 484 break; 485 } 486 } 487 488 if (MarkerAddr) { 489 if (SinkLifeStart) 490 SinkCands.insert(Markers.first); 491 if (!definedInRegion(Blocks, MarkerAddr)) 492 SinkCands.insert(MarkerAddr); 493 SinkCands.insert(AI); 494 if (HoistLifeEnd) 495 HoistCands.insert(Markers.second); 496 } 497 } 498 } 499 } 500 501 void CodeExtractor::findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs, 502 const ValueSet &SinkCands) const { 503 for (BasicBlock *BB : Blocks) { 504 // If a used value is defined outside the region, it's an input. If an 505 // instruction is used outside the region, it's an output. 506 for (Instruction &II : *BB) { 507 for (User::op_iterator OI = II.op_begin(), OE = II.op_end(); OI != OE; 508 ++OI) { 509 Value *V = *OI; 510 if (!SinkCands.count(V) && definedInCaller(Blocks, V)) 511 Inputs.insert(V); 512 } 513 514 for (User *U : II.users()) 515 if (!definedInRegion(Blocks, U)) { 516 Outputs.insert(&II); 517 break; 518 } 519 } 520 } 521 } 522 523 /// severSplitPHINodes - If a PHI node has multiple inputs from outside of the 524 /// region, we need to split the entry block of the region so that the PHI node 525 /// is easier to deal with. 526 void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) { 527 unsigned NumPredsFromRegion = 0; 528 unsigned NumPredsOutsideRegion = 0; 529 530 if (Header != &Header->getParent()->getEntryBlock()) { 531 PHINode *PN = dyn_cast<PHINode>(Header->begin()); 532 if (!PN) return; // No PHI nodes. 533 534 // If the header node contains any PHI nodes, check to see if there is more 535 // than one entry from outside the region. If so, we need to sever the 536 // header block into two. 537 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 538 if (Blocks.count(PN->getIncomingBlock(i))) 539 ++NumPredsFromRegion; 540 else 541 ++NumPredsOutsideRegion; 542 543 // If there is one (or fewer) predecessor from outside the region, we don't 544 // need to do anything special. 545 if (NumPredsOutsideRegion <= 1) return; 546 } 547 548 // Otherwise, we need to split the header block into two pieces: one 549 // containing PHI nodes merging values from outside of the region, and a 550 // second that contains all of the code for the block and merges back any 551 // incoming values from inside of the region. 552 BasicBlock *NewBB = SplitBlock(Header, Header->getFirstNonPHI(), DT); 553 554 // We only want to code extract the second block now, and it becomes the new 555 // header of the region. 556 BasicBlock *OldPred = Header; 557 Blocks.remove(OldPred); 558 Blocks.insert(NewBB); 559 Header = NewBB; 560 561 // Okay, now we need to adjust the PHI nodes and any branches from within the 562 // region to go to the new header block instead of the old header block. 563 if (NumPredsFromRegion) { 564 PHINode *PN = cast<PHINode>(OldPred->begin()); 565 // Loop over all of the predecessors of OldPred that are in the region, 566 // changing them to branch to NewBB instead. 567 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 568 if (Blocks.count(PN->getIncomingBlock(i))) { 569 Instruction *TI = PN->getIncomingBlock(i)->getTerminator(); 570 TI->replaceUsesOfWith(OldPred, NewBB); 571 } 572 573 // Okay, everything within the region is now branching to the right block, we 574 // just have to update the PHI nodes now, inserting PHI nodes into NewBB. 575 BasicBlock::iterator AfterPHIs; 576 for (AfterPHIs = OldPred->begin(); isa<PHINode>(AfterPHIs); ++AfterPHIs) { 577 PHINode *PN = cast<PHINode>(AfterPHIs); 578 // Create a new PHI node in the new region, which has an incoming value 579 // from OldPred of PN. 580 PHINode *NewPN = PHINode::Create(PN->getType(), 1 + NumPredsFromRegion, 581 PN->getName() + ".ce", &NewBB->front()); 582 PN->replaceAllUsesWith(NewPN); 583 NewPN->addIncoming(PN, OldPred); 584 585 // Loop over all of the incoming value in PN, moving them to NewPN if they 586 // are from the extracted region. 587 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) { 588 if (Blocks.count(PN->getIncomingBlock(i))) { 589 NewPN->addIncoming(PN->getIncomingValue(i), PN->getIncomingBlock(i)); 590 PN->removeIncomingValue(i); 591 --i; 592 } 593 } 594 } 595 } 596 } 597 598 void CodeExtractor::splitReturnBlocks() { 599 for (BasicBlock *Block : Blocks) 600 if (ReturnInst *RI = dyn_cast<ReturnInst>(Block->getTerminator())) { 601 BasicBlock *New = 602 Block->splitBasicBlock(RI->getIterator(), Block->getName() + ".ret"); 603 if (DT) { 604 // Old dominates New. New node dominates all other nodes dominated 605 // by Old. 606 DomTreeNode *OldNode = DT->getNode(Block); 607 SmallVector<DomTreeNode *, 8> Children(OldNode->begin(), 608 OldNode->end()); 609 610 DomTreeNode *NewNode = DT->addNewBlock(New, Block); 611 612 for (DomTreeNode *I : Children) 613 DT->changeImmediateDominator(I, NewNode); 614 } 615 } 616 } 617 618 /// constructFunction - make a function based on inputs and outputs, as follows: 619 /// f(in0, ..., inN, out0, ..., outN) 620 Function *CodeExtractor::constructFunction(const ValueSet &inputs, 621 const ValueSet &outputs, 622 BasicBlock *header, 623 BasicBlock *newRootNode, 624 BasicBlock *newHeader, 625 Function *oldFunction, 626 Module *M) { 627 LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n"); 628 LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n"); 629 630 // This function returns unsigned, outputs will go back by reference. 631 switch (NumExitBlocks) { 632 case 0: 633 case 1: RetTy = Type::getVoidTy(header->getContext()); break; 634 case 2: RetTy = Type::getInt1Ty(header->getContext()); break; 635 default: RetTy = Type::getInt16Ty(header->getContext()); break; 636 } 637 638 std::vector<Type *> paramTy; 639 640 // Add the types of the input values to the function's argument list 641 for (Value *value : inputs) { 642 LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n"); 643 paramTy.push_back(value->getType()); 644 } 645 646 // Add the types of the output values to the function's argument list. 647 for (Value *output : outputs) { 648 LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n"); 649 if (AggregateArgs) 650 paramTy.push_back(output->getType()); 651 else 652 paramTy.push_back(PointerType::getUnqual(output->getType())); 653 } 654 655 LLVM_DEBUG({ 656 dbgs() << "Function type: " << *RetTy << " f("; 657 for (Type *i : paramTy) 658 dbgs() << *i << ", "; 659 dbgs() << ")\n"; 660 }); 661 662 StructType *StructTy; 663 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) { 664 StructTy = StructType::get(M->getContext(), paramTy); 665 paramTy.clear(); 666 paramTy.push_back(PointerType::getUnqual(StructTy)); 667 } 668 FunctionType *funcType = 669 FunctionType::get(RetTy, paramTy, 670 AllowVarArgs && oldFunction->isVarArg()); 671 672 // Create the new function 673 Function *newFunction = Function::Create( 674 funcType, GlobalValue::InternalLinkage, oldFunction->getAddressSpace(), 675 oldFunction->getName() + "_" + header->getName(), M); 676 // If the old function is no-throw, so is the new one. 677 if (oldFunction->doesNotThrow()) 678 newFunction->setDoesNotThrow(); 679 680 // Inherit the uwtable attribute if we need to. 681 if (oldFunction->hasUWTable()) 682 newFunction->setHasUWTable(); 683 684 // Inherit all of the target dependent attributes and white-listed 685 // target independent attributes. 686 // (e.g. If the extracted region contains a call to an x86.sse 687 // instruction we need to make sure that the extracted region has the 688 // "target-features" attribute allowing it to be lowered. 689 // FIXME: This should be changed to check to see if a specific 690 // attribute can not be inherited. 691 for (const auto &Attr : oldFunction->getAttributes().getFnAttributes()) { 692 if (Attr.isStringAttribute()) { 693 if (Attr.getKindAsString() == "thunk") 694 continue; 695 } else 696 switch (Attr.getKindAsEnum()) { 697 // Those attributes cannot be propagated safely. Explicitly list them 698 // here so we get a warning if new attributes are added. This list also 699 // includes non-function attributes. 700 case Attribute::Alignment: 701 case Attribute::AllocSize: 702 case Attribute::ArgMemOnly: 703 case Attribute::Builtin: 704 case Attribute::ByVal: 705 case Attribute::Convergent: 706 case Attribute::Dereferenceable: 707 case Attribute::DereferenceableOrNull: 708 case Attribute::InAlloca: 709 case Attribute::InReg: 710 case Attribute::InaccessibleMemOnly: 711 case Attribute::InaccessibleMemOrArgMemOnly: 712 case Attribute::JumpTable: 713 case Attribute::Naked: 714 case Attribute::Nest: 715 case Attribute::NoAlias: 716 case Attribute::NoBuiltin: 717 case Attribute::NoCapture: 718 case Attribute::NoReturn: 719 case Attribute::None: 720 case Attribute::NonNull: 721 case Attribute::ReadNone: 722 case Attribute::ReadOnly: 723 case Attribute::Returned: 724 case Attribute::ReturnsTwice: 725 case Attribute::SExt: 726 case Attribute::Speculatable: 727 case Attribute::StackAlignment: 728 case Attribute::StructRet: 729 case Attribute::SwiftError: 730 case Attribute::SwiftSelf: 731 case Attribute::WriteOnly: 732 case Attribute::ZExt: 733 case Attribute::EndAttrKinds: 734 continue; 735 // Those attributes should be safe to propagate to the extracted function. 736 case Attribute::AlwaysInline: 737 case Attribute::Cold: 738 case Attribute::NoRecurse: 739 case Attribute::InlineHint: 740 case Attribute::MinSize: 741 case Attribute::NoDuplicate: 742 case Attribute::NoImplicitFloat: 743 case Attribute::NoInline: 744 case Attribute::NonLazyBind: 745 case Attribute::NoRedZone: 746 case Attribute::NoUnwind: 747 case Attribute::OptForFuzzing: 748 case Attribute::OptimizeNone: 749 case Attribute::OptimizeForSize: 750 case Attribute::SafeStack: 751 case Attribute::ShadowCallStack: 752 case Attribute::SanitizeAddress: 753 case Attribute::SanitizeMemory: 754 case Attribute::SanitizeThread: 755 case Attribute::SanitizeHWAddress: 756 case Attribute::SpeculativeLoadHardening: 757 case Attribute::StackProtect: 758 case Attribute::StackProtectReq: 759 case Attribute::StackProtectStrong: 760 case Attribute::StrictFP: 761 case Attribute::UWTable: 762 case Attribute::NoCfCheck: 763 break; 764 } 765 766 newFunction->addFnAttr(Attr); 767 } 768 newFunction->getBasicBlockList().push_back(newRootNode); 769 770 // Create an iterator to name all of the arguments we inserted. 771 Function::arg_iterator AI = newFunction->arg_begin(); 772 773 // Rewrite all users of the inputs in the extracted region to use the 774 // arguments (or appropriate addressing into struct) instead. 775 for (unsigned i = 0, e = inputs.size(); i != e; ++i) { 776 Value *RewriteVal; 777 if (AggregateArgs) { 778 Value *Idx[2]; 779 Idx[0] = Constant::getNullValue(Type::getInt32Ty(header->getContext())); 780 Idx[1] = ConstantInt::get(Type::getInt32Ty(header->getContext()), i); 781 Instruction *TI = newFunction->begin()->getTerminator(); 782 GetElementPtrInst *GEP = GetElementPtrInst::Create( 783 StructTy, &*AI, Idx, "gep_" + inputs[i]->getName(), TI); 784 RewriteVal = new LoadInst(GEP, "loadgep_" + inputs[i]->getName(), TI); 785 } else 786 RewriteVal = &*AI++; 787 788 std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end()); 789 for (User *use : Users) 790 if (Instruction *inst = dyn_cast<Instruction>(use)) 791 if (Blocks.count(inst->getParent())) 792 inst->replaceUsesOfWith(inputs[i], RewriteVal); 793 } 794 795 // Set names for input and output arguments. 796 if (!AggregateArgs) { 797 AI = newFunction->arg_begin(); 798 for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++AI) 799 AI->setName(inputs[i]->getName()); 800 for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++AI) 801 AI->setName(outputs[i]->getName()+".out"); 802 } 803 804 // Rewrite branches to basic blocks outside of the loop to new dummy blocks 805 // within the new function. This must be done before we lose track of which 806 // blocks were originally in the code region. 807 std::vector<User *> Users(header->user_begin(), header->user_end()); 808 for (unsigned i = 0, e = Users.size(); i != e; ++i) 809 // The BasicBlock which contains the branch is not in the region 810 // modify the branch target to a new block 811 if (TerminatorInst *TI = dyn_cast<TerminatorInst>(Users[i])) 812 if (!Blocks.count(TI->getParent()) && 813 TI->getParent()->getParent() == oldFunction) 814 TI->replaceUsesOfWith(header, newHeader); 815 816 return newFunction; 817 } 818 819 /// emitCallAndSwitchStatement - This method sets up the caller side by adding 820 /// the call instruction, splitting any PHI nodes in the header block as 821 /// necessary. 822 void CodeExtractor:: 823 emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer, 824 ValueSet &inputs, ValueSet &outputs) { 825 // Emit a call to the new function, passing in: *pointer to struct (if 826 // aggregating parameters), or plan inputs and allocated memory for outputs 827 std::vector<Value *> params, StructValues, ReloadOutputs, Reloads; 828 829 Module *M = newFunction->getParent(); 830 LLVMContext &Context = M->getContext(); 831 const DataLayout &DL = M->getDataLayout(); 832 833 // Add inputs as params, or to be filled into the struct 834 for (Value *input : inputs) 835 if (AggregateArgs) 836 StructValues.push_back(input); 837 else 838 params.push_back(input); 839 840 // Create allocas for the outputs 841 for (Value *output : outputs) { 842 if (AggregateArgs) { 843 StructValues.push_back(output); 844 } else { 845 AllocaInst *alloca = 846 new AllocaInst(output->getType(), DL.getAllocaAddrSpace(), 847 nullptr, output->getName() + ".loc", 848 &codeReplacer->getParent()->front().front()); 849 ReloadOutputs.push_back(alloca); 850 params.push_back(alloca); 851 } 852 } 853 854 StructType *StructArgTy = nullptr; 855 AllocaInst *Struct = nullptr; 856 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) { 857 std::vector<Type *> ArgTypes; 858 for (ValueSet::iterator v = StructValues.begin(), 859 ve = StructValues.end(); v != ve; ++v) 860 ArgTypes.push_back((*v)->getType()); 861 862 // Allocate a struct at the beginning of this function 863 StructArgTy = StructType::get(newFunction->getContext(), ArgTypes); 864 Struct = new AllocaInst(StructArgTy, DL.getAllocaAddrSpace(), nullptr, 865 "structArg", 866 &codeReplacer->getParent()->front().front()); 867 params.push_back(Struct); 868 869 for (unsigned i = 0, e = inputs.size(); i != e; ++i) { 870 Value *Idx[2]; 871 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context)); 872 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), i); 873 GetElementPtrInst *GEP = GetElementPtrInst::Create( 874 StructArgTy, Struct, Idx, "gep_" + StructValues[i]->getName()); 875 codeReplacer->getInstList().push_back(GEP); 876 StoreInst *SI = new StoreInst(StructValues[i], GEP); 877 codeReplacer->getInstList().push_back(SI); 878 } 879 } 880 881 // Emit the call to the function 882 CallInst *call = CallInst::Create(newFunction, params, 883 NumExitBlocks > 1 ? "targetBlock" : ""); 884 // Add debug location to the new call, if the original function has debug 885 // info. In that case, the terminator of the entry block of the extracted 886 // function contains the first debug location of the extracted function, 887 // set in extractCodeRegion. 888 if (codeReplacer->getParent()->getSubprogram()) { 889 if (auto DL = newFunction->getEntryBlock().getTerminator()->getDebugLoc()) 890 call->setDebugLoc(DL); 891 } 892 codeReplacer->getInstList().push_back(call); 893 894 Function::arg_iterator OutputArgBegin = newFunction->arg_begin(); 895 unsigned FirstOut = inputs.size(); 896 if (!AggregateArgs) 897 std::advance(OutputArgBegin, inputs.size()); 898 899 // Reload the outputs passed in by reference. 900 Function::arg_iterator OAI = OutputArgBegin; 901 for (unsigned i = 0, e = outputs.size(); i != e; ++i) { 902 Value *Output = nullptr; 903 if (AggregateArgs) { 904 Value *Idx[2]; 905 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context)); 906 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i); 907 GetElementPtrInst *GEP = GetElementPtrInst::Create( 908 StructArgTy, Struct, Idx, "gep_reload_" + outputs[i]->getName()); 909 codeReplacer->getInstList().push_back(GEP); 910 Output = GEP; 911 } else { 912 Output = ReloadOutputs[i]; 913 } 914 LoadInst *load = new LoadInst(Output, outputs[i]->getName()+".reload"); 915 Reloads.push_back(load); 916 codeReplacer->getInstList().push_back(load); 917 std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end()); 918 for (unsigned u = 0, e = Users.size(); u != e; ++u) { 919 Instruction *inst = cast<Instruction>(Users[u]); 920 if (!Blocks.count(inst->getParent())) 921 inst->replaceUsesOfWith(outputs[i], load); 922 } 923 924 // Store to argument right after the definition of output value. 925 auto *OutI = dyn_cast<Instruction>(outputs[i]); 926 if (!OutI) 927 continue; 928 929 // Find proper insertion point. 930 Instruction *InsertPt; 931 // In case OutI is an invoke, we insert the store at the beginning in the 932 // 'normal destination' BB. Otherwise we insert the store right after OutI. 933 if (auto *InvokeI = dyn_cast<InvokeInst>(OutI)) 934 InsertPt = InvokeI->getNormalDest()->getFirstNonPHI(); 935 else 936 InsertPt = OutI->getNextNode(); 937 938 // Let's assume that there is no other guy interleave non-PHI in PHIs. 939 if (isa<PHINode>(InsertPt)) 940 InsertPt = InsertPt->getParent()->getFirstNonPHI(); 941 942 assert(OAI != newFunction->arg_end() && 943 "Number of output arguments should match " 944 "the amount of defined values"); 945 if (AggregateArgs) { 946 Value *Idx[2]; 947 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context)); 948 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i); 949 GetElementPtrInst *GEP = GetElementPtrInst::Create( 950 StructArgTy, &*OAI, Idx, "gep_" + outputs[i]->getName(), InsertPt); 951 new StoreInst(outputs[i], GEP, InsertPt); 952 // Since there should be only one struct argument aggregating 953 // all the output values, we shouldn't increment OAI, which always 954 // points to the struct argument, in this case. 955 } else { 956 new StoreInst(outputs[i], &*OAI, InsertPt); 957 ++OAI; 958 } 959 } 960 961 // Now we can emit a switch statement using the call as a value. 962 SwitchInst *TheSwitch = 963 SwitchInst::Create(Constant::getNullValue(Type::getInt16Ty(Context)), 964 codeReplacer, 0, codeReplacer); 965 966 // Since there may be multiple exits from the original region, make the new 967 // function return an unsigned, switch on that number. This loop iterates 968 // over all of the blocks in the extracted region, updating any terminator 969 // instructions in the to-be-extracted region that branch to blocks that are 970 // not in the region to be extracted. 971 std::map<BasicBlock *, BasicBlock *> ExitBlockMap; 972 973 unsigned switchVal = 0; 974 for (BasicBlock *Block : Blocks) { 975 Instruction *TI = Block->getTerminator(); 976 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 977 if (!Blocks.count(TI->getSuccessor(i))) { 978 BasicBlock *OldTarget = TI->getSuccessor(i); 979 // add a new basic block which returns the appropriate value 980 BasicBlock *&NewTarget = ExitBlockMap[OldTarget]; 981 if (!NewTarget) { 982 // If we don't already have an exit stub for this non-extracted 983 // destination, create one now! 984 NewTarget = BasicBlock::Create(Context, 985 OldTarget->getName() + ".exitStub", 986 newFunction); 987 unsigned SuccNum = switchVal++; 988 989 Value *brVal = nullptr; 990 switch (NumExitBlocks) { 991 case 0: 992 case 1: break; // No value needed. 993 case 2: // Conditional branch, return a bool 994 brVal = ConstantInt::get(Type::getInt1Ty(Context), !SuccNum); 995 break; 996 default: 997 brVal = ConstantInt::get(Type::getInt16Ty(Context), SuccNum); 998 break; 999 } 1000 1001 ReturnInst::Create(Context, brVal, NewTarget); 1002 1003 // Update the switch instruction. 1004 TheSwitch->addCase(ConstantInt::get(Type::getInt16Ty(Context), 1005 SuccNum), 1006 OldTarget); 1007 } 1008 1009 // rewrite the original branch instruction with this new target 1010 TI->setSuccessor(i, NewTarget); 1011 } 1012 } 1013 1014 // Now that we've done the deed, simplify the switch instruction. 1015 Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType(); 1016 switch (NumExitBlocks) { 1017 case 0: 1018 // There are no successors (the block containing the switch itself), which 1019 // means that previously this was the last part of the function, and hence 1020 // this should be rewritten as a `ret' 1021 1022 // Check if the function should return a value 1023 if (OldFnRetTy->isVoidTy()) { 1024 ReturnInst::Create(Context, nullptr, TheSwitch); // Return void 1025 } else if (OldFnRetTy == TheSwitch->getCondition()->getType()) { 1026 // return what we have 1027 ReturnInst::Create(Context, TheSwitch->getCondition(), TheSwitch); 1028 } else { 1029 // Otherwise we must have code extracted an unwind or something, just 1030 // return whatever we want. 1031 ReturnInst::Create(Context, 1032 Constant::getNullValue(OldFnRetTy), TheSwitch); 1033 } 1034 1035 TheSwitch->eraseFromParent(); 1036 break; 1037 case 1: 1038 // Only a single destination, change the switch into an unconditional 1039 // branch. 1040 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch); 1041 TheSwitch->eraseFromParent(); 1042 break; 1043 case 2: 1044 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch->getSuccessor(2), 1045 call, TheSwitch); 1046 TheSwitch->eraseFromParent(); 1047 break; 1048 default: 1049 // Otherwise, make the default destination of the switch instruction be one 1050 // of the other successors. 1051 TheSwitch->setCondition(call); 1052 TheSwitch->setDefaultDest(TheSwitch->getSuccessor(NumExitBlocks)); 1053 // Remove redundant case 1054 TheSwitch->removeCase(SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1)); 1055 break; 1056 } 1057 } 1058 1059 void CodeExtractor::moveCodeToFunction(Function *newFunction) { 1060 Function *oldFunc = (*Blocks.begin())->getParent(); 1061 Function::BasicBlockListType &oldBlocks = oldFunc->getBasicBlockList(); 1062 Function::BasicBlockListType &newBlocks = newFunction->getBasicBlockList(); 1063 1064 for (BasicBlock *Block : Blocks) { 1065 // Delete the basic block from the old function, and the list of blocks 1066 oldBlocks.remove(Block); 1067 1068 // Insert this basic block into the new function 1069 newBlocks.push_back(Block); 1070 } 1071 } 1072 1073 void CodeExtractor::calculateNewCallTerminatorWeights( 1074 BasicBlock *CodeReplacer, 1075 DenseMap<BasicBlock *, BlockFrequency> &ExitWeights, 1076 BranchProbabilityInfo *BPI) { 1077 using Distribution = BlockFrequencyInfoImplBase::Distribution; 1078 using BlockNode = BlockFrequencyInfoImplBase::BlockNode; 1079 1080 // Update the branch weights for the exit block. 1081 Instruction *TI = CodeReplacer->getTerminator(); 1082 SmallVector<unsigned, 8> BranchWeights(TI->getNumSuccessors(), 0); 1083 1084 // Block Frequency distribution with dummy node. 1085 Distribution BranchDist; 1086 1087 // Add each of the frequencies of the successors. 1088 for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) { 1089 BlockNode ExitNode(i); 1090 uint64_t ExitFreq = ExitWeights[TI->getSuccessor(i)].getFrequency(); 1091 if (ExitFreq != 0) 1092 BranchDist.addExit(ExitNode, ExitFreq); 1093 else 1094 BPI->setEdgeProbability(CodeReplacer, i, BranchProbability::getZero()); 1095 } 1096 1097 // Check for no total weight. 1098 if (BranchDist.Total == 0) 1099 return; 1100 1101 // Normalize the distribution so that they can fit in unsigned. 1102 BranchDist.normalize(); 1103 1104 // Create normalized branch weights and set the metadata. 1105 for (unsigned I = 0, E = BranchDist.Weights.size(); I < E; ++I) { 1106 const auto &Weight = BranchDist.Weights[I]; 1107 1108 // Get the weight and update the current BFI. 1109 BranchWeights[Weight.TargetNode.Index] = Weight.Amount; 1110 BranchProbability BP(Weight.Amount, BranchDist.Total); 1111 BPI->setEdgeProbability(CodeReplacer, Weight.TargetNode.Index, BP); 1112 } 1113 TI->setMetadata( 1114 LLVMContext::MD_prof, 1115 MDBuilder(TI->getContext()).createBranchWeights(BranchWeights)); 1116 } 1117 1118 Function *CodeExtractor::extractCodeRegion() { 1119 if (!isEligible()) 1120 return nullptr; 1121 1122 // Assumption: this is a single-entry code region, and the header is the first 1123 // block in the region. 1124 BasicBlock *header = *Blocks.begin(); 1125 Function *oldFunction = header->getParent(); 1126 1127 // For functions with varargs, check that varargs handling is only done in the 1128 // outlined function, i.e vastart and vaend are only used in outlined blocks. 1129 if (AllowVarArgs && oldFunction->getFunctionType()->isVarArg()) { 1130 auto containsVarArgIntrinsic = [](Instruction &I) { 1131 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 1132 if (const Function *F = CI->getCalledFunction()) 1133 return F->getIntrinsicID() == Intrinsic::vastart || 1134 F->getIntrinsicID() == Intrinsic::vaend; 1135 return false; 1136 }; 1137 1138 for (auto &BB : *oldFunction) { 1139 if (Blocks.count(&BB)) 1140 continue; 1141 if (llvm::any_of(BB, containsVarArgIntrinsic)) 1142 return nullptr; 1143 } 1144 } 1145 ValueSet inputs, outputs, SinkingCands, HoistingCands; 1146 BasicBlock *CommonExit = nullptr; 1147 1148 // Calculate the entry frequency of the new function before we change the root 1149 // block. 1150 BlockFrequency EntryFreq; 1151 if (BFI) { 1152 assert(BPI && "Both BPI and BFI are required to preserve profile info"); 1153 for (BasicBlock *Pred : predecessors(header)) { 1154 if (Blocks.count(Pred)) 1155 continue; 1156 EntryFreq += 1157 BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, header); 1158 } 1159 } 1160 1161 // If we have to split PHI nodes or the entry block, do so now. 1162 severSplitPHINodes(header); 1163 1164 // If we have any return instructions in the region, split those blocks so 1165 // that the return is not in the region. 1166 splitReturnBlocks(); 1167 1168 // This takes place of the original loop 1169 BasicBlock *codeReplacer = BasicBlock::Create(header->getContext(), 1170 "codeRepl", oldFunction, 1171 header); 1172 1173 // The new function needs a root node because other nodes can branch to the 1174 // head of the region, but the entry node of a function cannot have preds. 1175 BasicBlock *newFuncRoot = BasicBlock::Create(header->getContext(), 1176 "newFuncRoot"); 1177 auto *BranchI = BranchInst::Create(header); 1178 // If the original function has debug info, we have to add a debug location 1179 // to the new branch instruction from the artificial entry block. 1180 // We use the debug location of the first instruction in the extracted 1181 // blocks, as there is no other equivalent line in the source code. 1182 if (oldFunction->getSubprogram()) { 1183 any_of(Blocks, [&BranchI](const BasicBlock *BB) { 1184 return any_of(*BB, [&BranchI](const Instruction &I) { 1185 if (!I.getDebugLoc()) 1186 return false; 1187 BranchI->setDebugLoc(I.getDebugLoc()); 1188 return true; 1189 }); 1190 }); 1191 } 1192 newFuncRoot->getInstList().push_back(BranchI); 1193 1194 findAllocas(SinkingCands, HoistingCands, CommonExit); 1195 assert(HoistingCands.empty() || CommonExit); 1196 1197 // Find inputs to, outputs from the code region. 1198 findInputsOutputs(inputs, outputs, SinkingCands); 1199 1200 // Now sink all instructions which only have non-phi uses inside the region 1201 for (auto *II : SinkingCands) 1202 cast<Instruction>(II)->moveBefore(*newFuncRoot, 1203 newFuncRoot->getFirstInsertionPt()); 1204 1205 if (!HoistingCands.empty()) { 1206 auto *HoistToBlock = findOrCreateBlockForHoisting(CommonExit); 1207 Instruction *TI = HoistToBlock->getTerminator(); 1208 for (auto *II : HoistingCands) 1209 cast<Instruction>(II)->moveBefore(TI); 1210 } 1211 1212 // Calculate the exit blocks for the extracted region and the total exit 1213 // weights for each of those blocks. 1214 DenseMap<BasicBlock *, BlockFrequency> ExitWeights; 1215 SmallPtrSet<BasicBlock *, 1> ExitBlocks; 1216 for (BasicBlock *Block : Blocks) { 1217 for (succ_iterator SI = succ_begin(Block), SE = succ_end(Block); SI != SE; 1218 ++SI) { 1219 if (!Blocks.count(*SI)) { 1220 // Update the branch weight for this successor. 1221 if (BFI) { 1222 BlockFrequency &BF = ExitWeights[*SI]; 1223 BF += BFI->getBlockFreq(Block) * BPI->getEdgeProbability(Block, *SI); 1224 } 1225 ExitBlocks.insert(*SI); 1226 } 1227 } 1228 } 1229 NumExitBlocks = ExitBlocks.size(); 1230 1231 // Construct new function based on inputs/outputs & add allocas for all defs. 1232 Function *newFunction = constructFunction(inputs, outputs, header, 1233 newFuncRoot, 1234 codeReplacer, oldFunction, 1235 oldFunction->getParent()); 1236 1237 // Update the entry count of the function. 1238 if (BFI) { 1239 auto Count = BFI->getProfileCountFromFreq(EntryFreq.getFrequency()); 1240 if (Count.hasValue()) 1241 newFunction->setEntryCount( 1242 ProfileCount(Count.getValue(), Function::PCT_Real)); // FIXME 1243 BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency()); 1244 } 1245 1246 emitCallAndSwitchStatement(newFunction, codeReplacer, inputs, outputs); 1247 1248 moveCodeToFunction(newFunction); 1249 1250 // Propagate personality info to the new function if there is one. 1251 if (oldFunction->hasPersonalityFn()) 1252 newFunction->setPersonalityFn(oldFunction->getPersonalityFn()); 1253 1254 // Update the branch weights for the exit block. 1255 if (BFI && NumExitBlocks > 1) 1256 calculateNewCallTerminatorWeights(codeReplacer, ExitWeights, BPI); 1257 1258 // Loop over all of the PHI nodes in the header block, and change any 1259 // references to the old incoming edge to be the new incoming edge. 1260 for (BasicBlock::iterator I = header->begin(); isa<PHINode>(I); ++I) { 1261 PHINode *PN = cast<PHINode>(I); 1262 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1263 if (!Blocks.count(PN->getIncomingBlock(i))) 1264 PN->setIncomingBlock(i, newFuncRoot); 1265 } 1266 1267 // Look at all successors of the codeReplacer block. If any of these blocks 1268 // had PHI nodes in them, we need to update the "from" block to be the code 1269 // replacer, not the original block in the extracted region. 1270 std::vector<BasicBlock *> Succs(succ_begin(codeReplacer), 1271 succ_end(codeReplacer)); 1272 for (unsigned i = 0, e = Succs.size(); i != e; ++i) 1273 for (BasicBlock::iterator I = Succs[i]->begin(); isa<PHINode>(I); ++I) { 1274 PHINode *PN = cast<PHINode>(I); 1275 std::set<BasicBlock*> ProcessedPreds; 1276 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1277 if (Blocks.count(PN->getIncomingBlock(i))) { 1278 if (ProcessedPreds.insert(PN->getIncomingBlock(i)).second) 1279 PN->setIncomingBlock(i, codeReplacer); 1280 else { 1281 // There were multiple entries in the PHI for this block, now there 1282 // is only one, so remove the duplicated entries. 1283 PN->removeIncomingValue(i, false); 1284 --i; --e; 1285 } 1286 } 1287 } 1288 1289 // Erase debug info intrinsics. Variable updates within the new function are 1290 // invisible to debuggers. This could be improved by defining a DISubprogram 1291 // for the new function. 1292 for (BasicBlock &BB : *newFunction) { 1293 auto BlockIt = BB.begin(); 1294 while (BlockIt != BB.end()) { 1295 Instruction *Inst = &*BlockIt; 1296 ++BlockIt; 1297 if (isa<DbgInfoIntrinsic>(Inst)) 1298 Inst->eraseFromParent(); 1299 } 1300 } 1301 1302 LLVM_DEBUG(if (verifyFunction(*newFunction)) 1303 report_fatal_error("verifyFunction failed!")); 1304 return newFunction; 1305 } 1306