1 //===- CodeExtractor.cpp - Pull code region into a new function -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the interface to tear out a code region, such as an
10 // individual loop or a parallel section, into a new function, replacing it with
11 // a call to the new function.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/Utils/CodeExtractor.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/BlockFrequencyInfo.h"
25 #include "llvm/Analysis/BlockFrequencyInfoImpl.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/IR/Argument.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DIBuilder.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DebugInfoMetadata.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalValue.h"
41 #include "llvm/IR/InstIterator.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/MDBuilder.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/PatternMatch.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/User.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/IR/Verifier.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/BlockFrequency.h"
57 #include "llvm/Support/BranchProbability.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
64 #include "llvm/Transforms/Utils/Local.h"
65 #include <cassert>
66 #include <cstdint>
67 #include <iterator>
68 #include <map>
69 #include <set>
70 #include <utility>
71 #include <vector>
72
73 using namespace llvm;
74 using namespace llvm::PatternMatch;
75 using ProfileCount = Function::ProfileCount;
76
77 #define DEBUG_TYPE "code-extractor"
78
79 // Provide a command-line option to aggregate function arguments into a struct
80 // for functions produced by the code extractor. This is useful when converting
81 // extracted functions to pthread-based code, as only one argument (void*) can
82 // be passed in to pthread_create().
83 static cl::opt<bool>
84 AggregateArgsOpt("aggregate-extracted-args", cl::Hidden,
85 cl::desc("Aggregate arguments to code-extracted functions"));
86
87 /// Test whether a block is valid for extraction.
isBlockValidForExtraction(const BasicBlock & BB,const SetVector<BasicBlock * > & Result,bool AllowVarArgs,bool AllowAlloca)88 static bool isBlockValidForExtraction(const BasicBlock &BB,
89 const SetVector<BasicBlock *> &Result,
90 bool AllowVarArgs, bool AllowAlloca) {
91 // taking the address of a basic block moved to another function is illegal
92 if (BB.hasAddressTaken())
93 return false;
94
95 // don't hoist code that uses another basicblock address, as it's likely to
96 // lead to unexpected behavior, like cross-function jumps
97 SmallPtrSet<User const *, 16> Visited;
98 SmallVector<User const *, 16> ToVisit;
99
100 for (Instruction const &Inst : BB)
101 ToVisit.push_back(&Inst);
102
103 while (!ToVisit.empty()) {
104 User const *Curr = ToVisit.pop_back_val();
105 if (!Visited.insert(Curr).second)
106 continue;
107 if (isa<BlockAddress const>(Curr))
108 return false; // even a reference to self is likely to be not compatible
109
110 if (isa<Instruction>(Curr) && cast<Instruction>(Curr)->getParent() != &BB)
111 continue;
112
113 for (auto const &U : Curr->operands()) {
114 if (auto *UU = dyn_cast<User>(U))
115 ToVisit.push_back(UU);
116 }
117 }
118
119 // If explicitly requested, allow vastart and alloca. For invoke instructions
120 // verify that extraction is valid.
121 for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
122 if (isa<AllocaInst>(I)) {
123 if (!AllowAlloca)
124 return false;
125 continue;
126 }
127
128 if (const auto *II = dyn_cast<InvokeInst>(I)) {
129 // Unwind destination (either a landingpad, catchswitch, or cleanuppad)
130 // must be a part of the subgraph which is being extracted.
131 if (auto *UBB = II->getUnwindDest())
132 if (!Result.count(UBB))
133 return false;
134 continue;
135 }
136
137 // All catch handlers of a catchswitch instruction as well as the unwind
138 // destination must be in the subgraph.
139 if (const auto *CSI = dyn_cast<CatchSwitchInst>(I)) {
140 if (auto *UBB = CSI->getUnwindDest())
141 if (!Result.count(UBB))
142 return false;
143 for (auto *HBB : CSI->handlers())
144 if (!Result.count(const_cast<BasicBlock*>(HBB)))
145 return false;
146 continue;
147 }
148
149 // Make sure that entire catch handler is within subgraph. It is sufficient
150 // to check that catch return's block is in the list.
151 if (const auto *CPI = dyn_cast<CatchPadInst>(I)) {
152 for (const auto *U : CPI->users())
153 if (const auto *CRI = dyn_cast<CatchReturnInst>(U))
154 if (!Result.count(const_cast<BasicBlock*>(CRI->getParent())))
155 return false;
156 continue;
157 }
158
159 // And do similar checks for cleanup handler - the entire handler must be
160 // in subgraph which is going to be extracted. For cleanup return should
161 // additionally check that the unwind destination is also in the subgraph.
162 if (const auto *CPI = dyn_cast<CleanupPadInst>(I)) {
163 for (const auto *U : CPI->users())
164 if (const auto *CRI = dyn_cast<CleanupReturnInst>(U))
165 if (!Result.count(const_cast<BasicBlock*>(CRI->getParent())))
166 return false;
167 continue;
168 }
169 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) {
170 if (auto *UBB = CRI->getUnwindDest())
171 if (!Result.count(UBB))
172 return false;
173 continue;
174 }
175
176 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
177 if (const Function *F = CI->getCalledFunction()) {
178 auto IID = F->getIntrinsicID();
179 if (IID == Intrinsic::vastart) {
180 if (AllowVarArgs)
181 continue;
182 else
183 return false;
184 }
185
186 // Currently, we miscompile outlined copies of eh_typid_for. There are
187 // proposals for fixing this in llvm.org/PR39545.
188 if (IID == Intrinsic::eh_typeid_for)
189 return false;
190 }
191 }
192 }
193
194 return true;
195 }
196
197 /// Build a set of blocks to extract if the input blocks are viable.
198 static SetVector<BasicBlock *>
buildExtractionBlockSet(ArrayRef<BasicBlock * > BBs,DominatorTree * DT,bool AllowVarArgs,bool AllowAlloca)199 buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
200 bool AllowVarArgs, bool AllowAlloca) {
201 assert(!BBs.empty() && "The set of blocks to extract must be non-empty");
202 SetVector<BasicBlock *> Result;
203
204 // Loop over the blocks, adding them to our set-vector, and aborting with an
205 // empty set if we encounter invalid blocks.
206 for (BasicBlock *BB : BBs) {
207 // If this block is dead, don't process it.
208 if (DT && !DT->isReachableFromEntry(BB))
209 continue;
210
211 if (!Result.insert(BB))
212 llvm_unreachable("Repeated basic blocks in extraction input");
213 }
214
215 LLVM_DEBUG(dbgs() << "Region front block: " << Result.front()->getName()
216 << '\n');
217
218 for (auto *BB : Result) {
219 if (!isBlockValidForExtraction(*BB, Result, AllowVarArgs, AllowAlloca))
220 return {};
221
222 // Make sure that the first block is not a landing pad.
223 if (BB == Result.front()) {
224 if (BB->isEHPad()) {
225 LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n");
226 return {};
227 }
228 continue;
229 }
230
231 // All blocks other than the first must not have predecessors outside of
232 // the subgraph which is being extracted.
233 for (auto *PBB : predecessors(BB))
234 if (!Result.count(PBB)) {
235 LLVM_DEBUG(dbgs() << "No blocks in this region may have entries from "
236 "outside the region except for the first block!\n"
237 << "Problematic source BB: " << BB->getName() << "\n"
238 << "Problematic destination BB: " << PBB->getName()
239 << "\n");
240 return {};
241 }
242 }
243
244 return Result;
245 }
246
CodeExtractor(ArrayRef<BasicBlock * > BBs,DominatorTree * DT,bool AggregateArgs,BlockFrequencyInfo * BFI,BranchProbabilityInfo * BPI,AssumptionCache * AC,bool AllowVarArgs,bool AllowAlloca,std::string Suffix)247 CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
248 bool AggregateArgs, BlockFrequencyInfo *BFI,
249 BranchProbabilityInfo *BPI, AssumptionCache *AC,
250 bool AllowVarArgs, bool AllowAlloca,
251 std::string Suffix)
252 : DT(DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
253 BPI(BPI), AC(AC), AllowVarArgs(AllowVarArgs),
254 Blocks(buildExtractionBlockSet(BBs, DT, AllowVarArgs, AllowAlloca)),
255 Suffix(Suffix) {}
256
CodeExtractor(DominatorTree & DT,Loop & L,bool AggregateArgs,BlockFrequencyInfo * BFI,BranchProbabilityInfo * BPI,AssumptionCache * AC,std::string Suffix)257 CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs,
258 BlockFrequencyInfo *BFI,
259 BranchProbabilityInfo *BPI, AssumptionCache *AC,
260 std::string Suffix)
261 : DT(&DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
262 BPI(BPI), AC(AC), AllowVarArgs(false),
263 Blocks(buildExtractionBlockSet(L.getBlocks(), &DT,
264 /* AllowVarArgs */ false,
265 /* AllowAlloca */ false)),
266 Suffix(Suffix) {}
267
268 /// definedInRegion - Return true if the specified value is defined in the
269 /// extracted region.
definedInRegion(const SetVector<BasicBlock * > & Blocks,Value * V)270 static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) {
271 if (Instruction *I = dyn_cast<Instruction>(V))
272 if (Blocks.count(I->getParent()))
273 return true;
274 return false;
275 }
276
277 /// definedInCaller - Return true if the specified value is defined in the
278 /// function being code extracted, but not in the region being extracted.
279 /// These values must be passed in as live-ins to the function.
definedInCaller(const SetVector<BasicBlock * > & Blocks,Value * V)280 static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) {
281 if (isa<Argument>(V)) return true;
282 if (Instruction *I = dyn_cast<Instruction>(V))
283 if (!Blocks.count(I->getParent()))
284 return true;
285 return false;
286 }
287
getCommonExitBlock(const SetVector<BasicBlock * > & Blocks)288 static BasicBlock *getCommonExitBlock(const SetVector<BasicBlock *> &Blocks) {
289 BasicBlock *CommonExitBlock = nullptr;
290 auto hasNonCommonExitSucc = [&](BasicBlock *Block) {
291 for (auto *Succ : successors(Block)) {
292 // Internal edges, ok.
293 if (Blocks.count(Succ))
294 continue;
295 if (!CommonExitBlock) {
296 CommonExitBlock = Succ;
297 continue;
298 }
299 if (CommonExitBlock != Succ)
300 return true;
301 }
302 return false;
303 };
304
305 if (any_of(Blocks, hasNonCommonExitSucc))
306 return nullptr;
307
308 return CommonExitBlock;
309 }
310
CodeExtractorAnalysisCache(Function & F)311 CodeExtractorAnalysisCache::CodeExtractorAnalysisCache(Function &F) {
312 for (BasicBlock &BB : F) {
313 for (Instruction &II : BB.instructionsWithoutDebug())
314 if (auto *AI = dyn_cast<AllocaInst>(&II))
315 Allocas.push_back(AI);
316
317 findSideEffectInfoForBlock(BB);
318 }
319 }
320
findSideEffectInfoForBlock(BasicBlock & BB)321 void CodeExtractorAnalysisCache::findSideEffectInfoForBlock(BasicBlock &BB) {
322 for (Instruction &II : BB.instructionsWithoutDebug()) {
323 unsigned Opcode = II.getOpcode();
324 Value *MemAddr = nullptr;
325 switch (Opcode) {
326 case Instruction::Store:
327 case Instruction::Load: {
328 if (Opcode == Instruction::Store) {
329 StoreInst *SI = cast<StoreInst>(&II);
330 MemAddr = SI->getPointerOperand();
331 } else {
332 LoadInst *LI = cast<LoadInst>(&II);
333 MemAddr = LI->getPointerOperand();
334 }
335 // Global variable can not be aliased with locals.
336 if (isa<Constant>(MemAddr))
337 break;
338 Value *Base = MemAddr->stripInBoundsConstantOffsets();
339 if (!isa<AllocaInst>(Base)) {
340 SideEffectingBlocks.insert(&BB);
341 return;
342 }
343 BaseMemAddrs[&BB].insert(Base);
344 break;
345 }
346 default: {
347 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
348 if (IntrInst) {
349 if (IntrInst->isLifetimeStartOrEnd())
350 break;
351 SideEffectingBlocks.insert(&BB);
352 return;
353 }
354 // Treat all the other cases conservatively if it has side effects.
355 if (II.mayHaveSideEffects()) {
356 SideEffectingBlocks.insert(&BB);
357 return;
358 }
359 }
360 }
361 }
362 }
363
doesBlockContainClobberOfAddr(BasicBlock & BB,AllocaInst * Addr) const364 bool CodeExtractorAnalysisCache::doesBlockContainClobberOfAddr(
365 BasicBlock &BB, AllocaInst *Addr) const {
366 if (SideEffectingBlocks.count(&BB))
367 return true;
368 auto It = BaseMemAddrs.find(&BB);
369 if (It != BaseMemAddrs.end())
370 return It->second.count(Addr);
371 return false;
372 }
373
isLegalToShrinkwrapLifetimeMarkers(const CodeExtractorAnalysisCache & CEAC,Instruction * Addr) const374 bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
375 const CodeExtractorAnalysisCache &CEAC, Instruction *Addr) const {
376 AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
377 Function *Func = (*Blocks.begin())->getParent();
378 for (BasicBlock &BB : *Func) {
379 if (Blocks.count(&BB))
380 continue;
381 if (CEAC.doesBlockContainClobberOfAddr(BB, AI))
382 return false;
383 }
384 return true;
385 }
386
387 BasicBlock *
findOrCreateBlockForHoisting(BasicBlock * CommonExitBlock)388 CodeExtractor::findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock) {
389 BasicBlock *SinglePredFromOutlineRegion = nullptr;
390 assert(!Blocks.count(CommonExitBlock) &&
391 "Expect a block outside the region!");
392 for (auto *Pred : predecessors(CommonExitBlock)) {
393 if (!Blocks.count(Pred))
394 continue;
395 if (!SinglePredFromOutlineRegion) {
396 SinglePredFromOutlineRegion = Pred;
397 } else if (SinglePredFromOutlineRegion != Pred) {
398 SinglePredFromOutlineRegion = nullptr;
399 break;
400 }
401 }
402
403 if (SinglePredFromOutlineRegion)
404 return SinglePredFromOutlineRegion;
405
406 #ifndef NDEBUG
407 auto getFirstPHI = [](BasicBlock *BB) {
408 BasicBlock::iterator I = BB->begin();
409 PHINode *FirstPhi = nullptr;
410 while (I != BB->end()) {
411 PHINode *Phi = dyn_cast<PHINode>(I);
412 if (!Phi)
413 break;
414 if (!FirstPhi) {
415 FirstPhi = Phi;
416 break;
417 }
418 }
419 return FirstPhi;
420 };
421 // If there are any phi nodes, the single pred either exists or has already
422 // be created before code extraction.
423 assert(!getFirstPHI(CommonExitBlock) && "Phi not expected");
424 #endif
425
426 BasicBlock *NewExitBlock = CommonExitBlock->splitBasicBlock(
427 CommonExitBlock->getFirstNonPHI()->getIterator());
428
429 for (BasicBlock *Pred :
430 llvm::make_early_inc_range(predecessors(CommonExitBlock))) {
431 if (Blocks.count(Pred))
432 continue;
433 Pred->getTerminator()->replaceUsesOfWith(CommonExitBlock, NewExitBlock);
434 }
435 // Now add the old exit block to the outline region.
436 Blocks.insert(CommonExitBlock);
437 return CommonExitBlock;
438 }
439
440 // Find the pair of life time markers for address 'Addr' that are either
441 // defined inside the outline region or can legally be shrinkwrapped into the
442 // outline region. If there are not other untracked uses of the address, return
443 // the pair of markers if found; otherwise return a pair of nullptr.
444 CodeExtractor::LifetimeMarkerInfo
getLifetimeMarkers(const CodeExtractorAnalysisCache & CEAC,Instruction * Addr,BasicBlock * ExitBlock) const445 CodeExtractor::getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
446 Instruction *Addr,
447 BasicBlock *ExitBlock) const {
448 LifetimeMarkerInfo Info;
449
450 for (User *U : Addr->users()) {
451 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(U);
452 if (IntrInst) {
453 // We don't model addresses with multiple start/end markers, but the
454 // markers do not need to be in the region.
455 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start) {
456 if (Info.LifeStart)
457 return {};
458 Info.LifeStart = IntrInst;
459 continue;
460 }
461 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) {
462 if (Info.LifeEnd)
463 return {};
464 Info.LifeEnd = IntrInst;
465 continue;
466 }
467 // At this point, permit debug uses outside of the region.
468 // This is fixed in a later call to fixupDebugInfoPostExtraction().
469 if (isa<DbgInfoIntrinsic>(IntrInst))
470 continue;
471 }
472 // Find untracked uses of the address, bail.
473 if (!definedInRegion(Blocks, U))
474 return {};
475 }
476
477 if (!Info.LifeStart || !Info.LifeEnd)
478 return {};
479
480 Info.SinkLifeStart = !definedInRegion(Blocks, Info.LifeStart);
481 Info.HoistLifeEnd = !definedInRegion(Blocks, Info.LifeEnd);
482 // Do legality check.
483 if ((Info.SinkLifeStart || Info.HoistLifeEnd) &&
484 !isLegalToShrinkwrapLifetimeMarkers(CEAC, Addr))
485 return {};
486
487 // Check to see if we have a place to do hoisting, if not, bail.
488 if (Info.HoistLifeEnd && !ExitBlock)
489 return {};
490
491 return Info;
492 }
493
findAllocas(const CodeExtractorAnalysisCache & CEAC,ValueSet & SinkCands,ValueSet & HoistCands,BasicBlock * & ExitBlock) const494 void CodeExtractor::findAllocas(const CodeExtractorAnalysisCache &CEAC,
495 ValueSet &SinkCands, ValueSet &HoistCands,
496 BasicBlock *&ExitBlock) const {
497 Function *Func = (*Blocks.begin())->getParent();
498 ExitBlock = getCommonExitBlock(Blocks);
499
500 auto moveOrIgnoreLifetimeMarkers =
501 [&](const LifetimeMarkerInfo &LMI) -> bool {
502 if (!LMI.LifeStart)
503 return false;
504 if (LMI.SinkLifeStart) {
505 LLVM_DEBUG(dbgs() << "Sinking lifetime.start: " << *LMI.LifeStart
506 << "\n");
507 SinkCands.insert(LMI.LifeStart);
508 }
509 if (LMI.HoistLifeEnd) {
510 LLVM_DEBUG(dbgs() << "Hoisting lifetime.end: " << *LMI.LifeEnd << "\n");
511 HoistCands.insert(LMI.LifeEnd);
512 }
513 return true;
514 };
515
516 // Look up allocas in the original function in CodeExtractorAnalysisCache, as
517 // this is much faster than walking all the instructions.
518 for (AllocaInst *AI : CEAC.getAllocas()) {
519 BasicBlock *BB = AI->getParent();
520 if (Blocks.count(BB))
521 continue;
522
523 // As a prior call to extractCodeRegion() may have shrinkwrapped the alloca,
524 // check whether it is actually still in the original function.
525 Function *AIFunc = BB->getParent();
526 if (AIFunc != Func)
527 continue;
528
529 LifetimeMarkerInfo MarkerInfo = getLifetimeMarkers(CEAC, AI, ExitBlock);
530 bool Moved = moveOrIgnoreLifetimeMarkers(MarkerInfo);
531 if (Moved) {
532 LLVM_DEBUG(dbgs() << "Sinking alloca: " << *AI << "\n");
533 SinkCands.insert(AI);
534 continue;
535 }
536
537 // Find bitcasts in the outlined region that have lifetime marker users
538 // outside that region. Replace the lifetime marker use with an
539 // outside region bitcast to avoid unnecessary alloca/reload instructions
540 // and extra lifetime markers.
541 SmallVector<Instruction *, 2> LifetimeBitcastUsers;
542 for (User *U : AI->users()) {
543 if (!definedInRegion(Blocks, U))
544 continue;
545
546 if (U->stripInBoundsConstantOffsets() != AI)
547 continue;
548
549 Instruction *Bitcast = cast<Instruction>(U);
550 for (User *BU : Bitcast->users()) {
551 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(BU);
552 if (!IntrInst)
553 continue;
554
555 if (!IntrInst->isLifetimeStartOrEnd())
556 continue;
557
558 if (definedInRegion(Blocks, IntrInst))
559 continue;
560
561 LLVM_DEBUG(dbgs() << "Replace use of extracted region bitcast"
562 << *Bitcast << " in out-of-region lifetime marker "
563 << *IntrInst << "\n");
564 LifetimeBitcastUsers.push_back(IntrInst);
565 }
566 }
567
568 for (Instruction *I : LifetimeBitcastUsers) {
569 Module *M = AIFunc->getParent();
570 LLVMContext &Ctx = M->getContext();
571 auto *Int8PtrTy = Type::getInt8PtrTy(Ctx);
572 CastInst *CastI =
573 CastInst::CreatePointerCast(AI, Int8PtrTy, "lt.cast", I);
574 I->replaceUsesOfWith(I->getOperand(1), CastI);
575 }
576
577 // Follow any bitcasts.
578 SmallVector<Instruction *, 2> Bitcasts;
579 SmallVector<LifetimeMarkerInfo, 2> BitcastLifetimeInfo;
580 for (User *U : AI->users()) {
581 if (U->stripInBoundsConstantOffsets() == AI) {
582 Instruction *Bitcast = cast<Instruction>(U);
583 LifetimeMarkerInfo LMI = getLifetimeMarkers(CEAC, Bitcast, ExitBlock);
584 if (LMI.LifeStart) {
585 Bitcasts.push_back(Bitcast);
586 BitcastLifetimeInfo.push_back(LMI);
587 continue;
588 }
589 }
590
591 // Found unknown use of AI.
592 if (!definedInRegion(Blocks, U)) {
593 Bitcasts.clear();
594 break;
595 }
596 }
597
598 // Either no bitcasts reference the alloca or there are unknown uses.
599 if (Bitcasts.empty())
600 continue;
601
602 LLVM_DEBUG(dbgs() << "Sinking alloca (via bitcast): " << *AI << "\n");
603 SinkCands.insert(AI);
604 for (unsigned I = 0, E = Bitcasts.size(); I != E; ++I) {
605 Instruction *BitcastAddr = Bitcasts[I];
606 const LifetimeMarkerInfo &LMI = BitcastLifetimeInfo[I];
607 assert(LMI.LifeStart &&
608 "Unsafe to sink bitcast without lifetime markers");
609 moveOrIgnoreLifetimeMarkers(LMI);
610 if (!definedInRegion(Blocks, BitcastAddr)) {
611 LLVM_DEBUG(dbgs() << "Sinking bitcast-of-alloca: " << *BitcastAddr
612 << "\n");
613 SinkCands.insert(BitcastAddr);
614 }
615 }
616 }
617 }
618
isEligible() const619 bool CodeExtractor::isEligible() const {
620 if (Blocks.empty())
621 return false;
622 BasicBlock *Header = *Blocks.begin();
623 Function *F = Header->getParent();
624
625 // For functions with varargs, check that varargs handling is only done in the
626 // outlined function, i.e vastart and vaend are only used in outlined blocks.
627 if (AllowVarArgs && F->getFunctionType()->isVarArg()) {
628 auto containsVarArgIntrinsic = [](const Instruction &I) {
629 if (const CallInst *CI = dyn_cast<CallInst>(&I))
630 if (const Function *Callee = CI->getCalledFunction())
631 return Callee->getIntrinsicID() == Intrinsic::vastart ||
632 Callee->getIntrinsicID() == Intrinsic::vaend;
633 return false;
634 };
635
636 for (auto &BB : *F) {
637 if (Blocks.count(&BB))
638 continue;
639 if (llvm::any_of(BB, containsVarArgIntrinsic))
640 return false;
641 }
642 }
643 return true;
644 }
645
findInputsOutputs(ValueSet & Inputs,ValueSet & Outputs,const ValueSet & SinkCands) const646 void CodeExtractor::findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
647 const ValueSet &SinkCands) const {
648 for (BasicBlock *BB : Blocks) {
649 // If a used value is defined outside the region, it's an input. If an
650 // instruction is used outside the region, it's an output.
651 for (Instruction &II : *BB) {
652 for (auto &OI : II.operands()) {
653 Value *V = OI;
654 if (!SinkCands.count(V) && definedInCaller(Blocks, V))
655 Inputs.insert(V);
656 }
657
658 for (User *U : II.users())
659 if (!definedInRegion(Blocks, U)) {
660 Outputs.insert(&II);
661 break;
662 }
663 }
664 }
665 }
666
667 /// severSplitPHINodesOfEntry - If a PHI node has multiple inputs from outside
668 /// of the region, we need to split the entry block of the region so that the
669 /// PHI node is easier to deal with.
severSplitPHINodesOfEntry(BasicBlock * & Header)670 void CodeExtractor::severSplitPHINodesOfEntry(BasicBlock *&Header) {
671 unsigned NumPredsFromRegion = 0;
672 unsigned NumPredsOutsideRegion = 0;
673
674 if (Header != &Header->getParent()->getEntryBlock()) {
675 PHINode *PN = dyn_cast<PHINode>(Header->begin());
676 if (!PN) return; // No PHI nodes.
677
678 // If the header node contains any PHI nodes, check to see if there is more
679 // than one entry from outside the region. If so, we need to sever the
680 // header block into two.
681 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
682 if (Blocks.count(PN->getIncomingBlock(i)))
683 ++NumPredsFromRegion;
684 else
685 ++NumPredsOutsideRegion;
686
687 // If there is one (or fewer) predecessor from outside the region, we don't
688 // need to do anything special.
689 if (NumPredsOutsideRegion <= 1) return;
690 }
691
692 // Otherwise, we need to split the header block into two pieces: one
693 // containing PHI nodes merging values from outside of the region, and a
694 // second that contains all of the code for the block and merges back any
695 // incoming values from inside of the region.
696 BasicBlock *NewBB = SplitBlock(Header, Header->getFirstNonPHI(), DT);
697
698 // We only want to code extract the second block now, and it becomes the new
699 // header of the region.
700 BasicBlock *OldPred = Header;
701 Blocks.remove(OldPred);
702 Blocks.insert(NewBB);
703 Header = NewBB;
704
705 // Okay, now we need to adjust the PHI nodes and any branches from within the
706 // region to go to the new header block instead of the old header block.
707 if (NumPredsFromRegion) {
708 PHINode *PN = cast<PHINode>(OldPred->begin());
709 // Loop over all of the predecessors of OldPred that are in the region,
710 // changing them to branch to NewBB instead.
711 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
712 if (Blocks.count(PN->getIncomingBlock(i))) {
713 Instruction *TI = PN->getIncomingBlock(i)->getTerminator();
714 TI->replaceUsesOfWith(OldPred, NewBB);
715 }
716
717 // Okay, everything within the region is now branching to the right block, we
718 // just have to update the PHI nodes now, inserting PHI nodes into NewBB.
719 BasicBlock::iterator AfterPHIs;
720 for (AfterPHIs = OldPred->begin(); isa<PHINode>(AfterPHIs); ++AfterPHIs) {
721 PHINode *PN = cast<PHINode>(AfterPHIs);
722 // Create a new PHI node in the new region, which has an incoming value
723 // from OldPred of PN.
724 PHINode *NewPN = PHINode::Create(PN->getType(), 1 + NumPredsFromRegion,
725 PN->getName() + ".ce", &NewBB->front());
726 PN->replaceAllUsesWith(NewPN);
727 NewPN->addIncoming(PN, OldPred);
728
729 // Loop over all of the incoming value in PN, moving them to NewPN if they
730 // are from the extracted region.
731 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
732 if (Blocks.count(PN->getIncomingBlock(i))) {
733 NewPN->addIncoming(PN->getIncomingValue(i), PN->getIncomingBlock(i));
734 PN->removeIncomingValue(i);
735 --i;
736 }
737 }
738 }
739 }
740 }
741
742 /// severSplitPHINodesOfExits - if PHI nodes in exit blocks have inputs from
743 /// outlined region, we split these PHIs on two: one with inputs from region
744 /// and other with remaining incoming blocks; then first PHIs are placed in
745 /// outlined region.
severSplitPHINodesOfExits(const SmallPtrSetImpl<BasicBlock * > & Exits)746 void CodeExtractor::severSplitPHINodesOfExits(
747 const SmallPtrSetImpl<BasicBlock *> &Exits) {
748 for (BasicBlock *ExitBB : Exits) {
749 BasicBlock *NewBB = nullptr;
750
751 for (PHINode &PN : ExitBB->phis()) {
752 // Find all incoming values from the outlining region.
753 SmallVector<unsigned, 2> IncomingVals;
754 for (unsigned i = 0; i < PN.getNumIncomingValues(); ++i)
755 if (Blocks.count(PN.getIncomingBlock(i)))
756 IncomingVals.push_back(i);
757
758 // Do not process PHI if there is one (or fewer) predecessor from region.
759 // If PHI has exactly one predecessor from region, only this one incoming
760 // will be replaced on codeRepl block, so it should be safe to skip PHI.
761 if (IncomingVals.size() <= 1)
762 continue;
763
764 // Create block for new PHIs and add it to the list of outlined if it
765 // wasn't done before.
766 if (!NewBB) {
767 NewBB = BasicBlock::Create(ExitBB->getContext(),
768 ExitBB->getName() + ".split",
769 ExitBB->getParent(), ExitBB);
770 SmallVector<BasicBlock *, 4> Preds(predecessors(ExitBB));
771 for (BasicBlock *PredBB : Preds)
772 if (Blocks.count(PredBB))
773 PredBB->getTerminator()->replaceUsesOfWith(ExitBB, NewBB);
774 BranchInst::Create(ExitBB, NewBB);
775 Blocks.insert(NewBB);
776 }
777
778 // Split this PHI.
779 PHINode *NewPN =
780 PHINode::Create(PN.getType(), IncomingVals.size(),
781 PN.getName() + ".ce", NewBB->getFirstNonPHI());
782 for (unsigned i : IncomingVals)
783 NewPN->addIncoming(PN.getIncomingValue(i), PN.getIncomingBlock(i));
784 for (unsigned i : reverse(IncomingVals))
785 PN.removeIncomingValue(i, false);
786 PN.addIncoming(NewPN, NewBB);
787 }
788 }
789 }
790
splitReturnBlocks()791 void CodeExtractor::splitReturnBlocks() {
792 for (BasicBlock *Block : Blocks)
793 if (ReturnInst *RI = dyn_cast<ReturnInst>(Block->getTerminator())) {
794 BasicBlock *New =
795 Block->splitBasicBlock(RI->getIterator(), Block->getName() + ".ret");
796 if (DT) {
797 // Old dominates New. New node dominates all other nodes dominated
798 // by Old.
799 DomTreeNode *OldNode = DT->getNode(Block);
800 SmallVector<DomTreeNode *, 8> Children(OldNode->begin(),
801 OldNode->end());
802
803 DomTreeNode *NewNode = DT->addNewBlock(New, Block);
804
805 for (DomTreeNode *I : Children)
806 DT->changeImmediateDominator(I, NewNode);
807 }
808 }
809 }
810
811 /// constructFunction - make a function based on inputs and outputs, as follows:
812 /// f(in0, ..., inN, out0, ..., outN)
constructFunction(const ValueSet & inputs,const ValueSet & outputs,BasicBlock * header,BasicBlock * newRootNode,BasicBlock * newHeader,Function * oldFunction,Module * M)813 Function *CodeExtractor::constructFunction(const ValueSet &inputs,
814 const ValueSet &outputs,
815 BasicBlock *header,
816 BasicBlock *newRootNode,
817 BasicBlock *newHeader,
818 Function *oldFunction,
819 Module *M) {
820 LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n");
821 LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n");
822
823 // This function returns unsigned, outputs will go back by reference.
824 switch (NumExitBlocks) {
825 case 0:
826 case 1: RetTy = Type::getVoidTy(header->getContext()); break;
827 case 2: RetTy = Type::getInt1Ty(header->getContext()); break;
828 default: RetTy = Type::getInt16Ty(header->getContext()); break;
829 }
830
831 std::vector<Type *> paramTy;
832
833 // Add the types of the input values to the function's argument list
834 for (Value *value : inputs) {
835 LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n");
836 paramTy.push_back(value->getType());
837 }
838
839 // Add the types of the output values to the function's argument list.
840 for (Value *output : outputs) {
841 LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n");
842 if (AggregateArgs)
843 paramTy.push_back(output->getType());
844 else
845 paramTy.push_back(PointerType::getUnqual(output->getType()));
846 }
847
848 LLVM_DEBUG({
849 dbgs() << "Function type: " << *RetTy << " f(";
850 for (Type *i : paramTy)
851 dbgs() << *i << ", ";
852 dbgs() << ")\n";
853 });
854
855 StructType *StructTy = nullptr;
856 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
857 StructTy = StructType::get(M->getContext(), paramTy);
858 paramTy.clear();
859 paramTy.push_back(PointerType::getUnqual(StructTy));
860 }
861 FunctionType *funcType =
862 FunctionType::get(RetTy, paramTy,
863 AllowVarArgs && oldFunction->isVarArg());
864
865 std::string SuffixToUse =
866 Suffix.empty()
867 ? (header->getName().empty() ? "extracted" : header->getName().str())
868 : Suffix;
869 // Create the new function
870 Function *newFunction = Function::Create(
871 funcType, GlobalValue::InternalLinkage, oldFunction->getAddressSpace(),
872 oldFunction->getName() + "." + SuffixToUse, M);
873 // If the old function is no-throw, so is the new one.
874 if (oldFunction->doesNotThrow())
875 newFunction->setDoesNotThrow();
876
877 // Inherit the uwtable attribute if we need to.
878 if (oldFunction->hasUWTable())
879 newFunction->setHasUWTable();
880
881 // Inherit all of the target dependent attributes and white-listed
882 // target independent attributes.
883 // (e.g. If the extracted region contains a call to an x86.sse
884 // instruction we need to make sure that the extracted region has the
885 // "target-features" attribute allowing it to be lowered.
886 // FIXME: This should be changed to check to see if a specific
887 // attribute can not be inherited.
888 for (const auto &Attr : oldFunction->getAttributes().getFnAttributes()) {
889 if (Attr.isStringAttribute()) {
890 if (Attr.getKindAsString() == "thunk")
891 continue;
892 } else
893 switch (Attr.getKindAsEnum()) {
894 // Those attributes cannot be propagated safely. Explicitly list them
895 // here so we get a warning if new attributes are added. This list also
896 // includes non-function attributes.
897 case Attribute::Alignment:
898 case Attribute::AllocSize:
899 case Attribute::ArgMemOnly:
900 case Attribute::Builtin:
901 case Attribute::ByVal:
902 case Attribute::Convergent:
903 case Attribute::Dereferenceable:
904 case Attribute::DereferenceableOrNull:
905 case Attribute::InAlloca:
906 case Attribute::InReg:
907 case Attribute::InaccessibleMemOnly:
908 case Attribute::InaccessibleMemOrArgMemOnly:
909 case Attribute::JumpTable:
910 case Attribute::Naked:
911 case Attribute::Nest:
912 case Attribute::NoAlias:
913 case Attribute::NoBuiltin:
914 case Attribute::NoCapture:
915 case Attribute::NoMerge:
916 case Attribute::NoReturn:
917 case Attribute::NoSync:
918 case Attribute::NoUndef:
919 case Attribute::None:
920 case Attribute::NonNull:
921 case Attribute::Preallocated:
922 case Attribute::ReadNone:
923 case Attribute::ReadOnly:
924 case Attribute::Returned:
925 case Attribute::ReturnsTwice:
926 case Attribute::SExt:
927 case Attribute::Speculatable:
928 case Attribute::StackAlignment:
929 case Attribute::StructRet:
930 case Attribute::SwiftError:
931 case Attribute::SwiftSelf:
932 case Attribute::SwiftAsync:
933 case Attribute::WillReturn:
934 case Attribute::WriteOnly:
935 case Attribute::ZExt:
936 case Attribute::ImmArg:
937 case Attribute::ByRef:
938 case Attribute::EndAttrKinds:
939 case Attribute::EmptyKey:
940 case Attribute::TombstoneKey:
941 continue;
942 // Those attributes should be safe to propagate to the extracted function.
943 case Attribute::AlwaysInline:
944 case Attribute::Cold:
945 case Attribute::Hot:
946 case Attribute::NoRecurse:
947 case Attribute::InlineHint:
948 case Attribute::MinSize:
949 case Attribute::NoCallback:
950 case Attribute::NoDuplicate:
951 case Attribute::NoFree:
952 case Attribute::NoImplicitFloat:
953 case Attribute::NoInline:
954 case Attribute::NonLazyBind:
955 case Attribute::NoRedZone:
956 case Attribute::NoUnwind:
957 case Attribute::NullPointerIsValid:
958 case Attribute::OptForFuzzing:
959 case Attribute::OptimizeNone:
960 case Attribute::OptimizeForSize:
961 case Attribute::SafeStack:
962 case Attribute::ShadowCallStack:
963 case Attribute::SanitizeAddress:
964 case Attribute::SanitizeMemory:
965 case Attribute::SanitizeThread:
966 case Attribute::SanitizeHWAddress:
967 case Attribute::SanitizeMemTag:
968 case Attribute::SpeculativeLoadHardening:
969 case Attribute::StackProtect:
970 case Attribute::StackProtectReq:
971 case Attribute::StackProtectStrong:
972 case Attribute::StrictFP:
973 case Attribute::UWTable:
974 case Attribute::VScaleRange:
975 case Attribute::NoCfCheck:
976 case Attribute::MustProgress:
977 case Attribute::NoProfile:
978 break;
979 }
980
981 newFunction->addFnAttr(Attr);
982 }
983 newFunction->getBasicBlockList().push_back(newRootNode);
984
985 // Create an iterator to name all of the arguments we inserted.
986 Function::arg_iterator AI = newFunction->arg_begin();
987
988 // Rewrite all users of the inputs in the extracted region to use the
989 // arguments (or appropriate addressing into struct) instead.
990 for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
991 Value *RewriteVal;
992 if (AggregateArgs) {
993 Value *Idx[2];
994 Idx[0] = Constant::getNullValue(Type::getInt32Ty(header->getContext()));
995 Idx[1] = ConstantInt::get(Type::getInt32Ty(header->getContext()), i);
996 Instruction *TI = newFunction->begin()->getTerminator();
997 GetElementPtrInst *GEP = GetElementPtrInst::Create(
998 StructTy, &*AI, Idx, "gep_" + inputs[i]->getName(), TI);
999 RewriteVal = new LoadInst(StructTy->getElementType(i), GEP,
1000 "loadgep_" + inputs[i]->getName(), TI);
1001 } else
1002 RewriteVal = &*AI++;
1003
1004 std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end());
1005 for (User *use : Users)
1006 if (Instruction *inst = dyn_cast<Instruction>(use))
1007 if (Blocks.count(inst->getParent()))
1008 inst->replaceUsesOfWith(inputs[i], RewriteVal);
1009 }
1010
1011 // Set names for input and output arguments.
1012 if (!AggregateArgs) {
1013 AI = newFunction->arg_begin();
1014 for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++AI)
1015 AI->setName(inputs[i]->getName());
1016 for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++AI)
1017 AI->setName(outputs[i]->getName()+".out");
1018 }
1019
1020 // Rewrite branches to basic blocks outside of the loop to new dummy blocks
1021 // within the new function. This must be done before we lose track of which
1022 // blocks were originally in the code region.
1023 std::vector<User *> Users(header->user_begin(), header->user_end());
1024 for (auto &U : Users)
1025 // The BasicBlock which contains the branch is not in the region
1026 // modify the branch target to a new block
1027 if (Instruction *I = dyn_cast<Instruction>(U))
1028 if (I->isTerminator() && I->getFunction() == oldFunction &&
1029 !Blocks.count(I->getParent()))
1030 I->replaceUsesOfWith(header, newHeader);
1031
1032 return newFunction;
1033 }
1034
1035 /// Erase lifetime.start markers which reference inputs to the extraction
1036 /// region, and insert the referenced memory into \p LifetimesStart.
1037 ///
1038 /// The extraction region is defined by a set of blocks (\p Blocks), and a set
1039 /// of allocas which will be moved from the caller function into the extracted
1040 /// function (\p SunkAllocas).
eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock * > & Blocks,const SetVector<Value * > & SunkAllocas,SetVector<Value * > & LifetimesStart)1041 static void eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
1042 const SetVector<Value *> &SunkAllocas,
1043 SetVector<Value *> &LifetimesStart) {
1044 for (BasicBlock *BB : Blocks) {
1045 for (auto It = BB->begin(), End = BB->end(); It != End;) {
1046 auto *II = dyn_cast<IntrinsicInst>(&*It);
1047 ++It;
1048 if (!II || !II->isLifetimeStartOrEnd())
1049 continue;
1050
1051 // Get the memory operand of the lifetime marker. If the underlying
1052 // object is a sunk alloca, or is otherwise defined in the extraction
1053 // region, the lifetime marker must not be erased.
1054 Value *Mem = II->getOperand(1)->stripInBoundsOffsets();
1055 if (SunkAllocas.count(Mem) || definedInRegion(Blocks, Mem))
1056 continue;
1057
1058 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1059 LifetimesStart.insert(Mem);
1060 II->eraseFromParent();
1061 }
1062 }
1063 }
1064
1065 /// Insert lifetime start/end markers surrounding the call to the new function
1066 /// for objects defined in the caller.
insertLifetimeMarkersSurroundingCall(Module * M,ArrayRef<Value * > LifetimesStart,ArrayRef<Value * > LifetimesEnd,CallInst * TheCall)1067 static void insertLifetimeMarkersSurroundingCall(
1068 Module *M, ArrayRef<Value *> LifetimesStart, ArrayRef<Value *> LifetimesEnd,
1069 CallInst *TheCall) {
1070 LLVMContext &Ctx = M->getContext();
1071 auto Int8PtrTy = Type::getInt8PtrTy(Ctx);
1072 auto NegativeOne = ConstantInt::getSigned(Type::getInt64Ty(Ctx), -1);
1073 Instruction *Term = TheCall->getParent()->getTerminator();
1074
1075 // The memory argument to a lifetime marker must be a i8*. Cache any bitcasts
1076 // needed to satisfy this requirement so they may be reused.
1077 DenseMap<Value *, Value *> Bitcasts;
1078
1079 // Emit lifetime markers for the pointers given in \p Objects. Insert the
1080 // markers before the call if \p InsertBefore, and after the call otherwise.
1081 auto insertMarkers = [&](Function *MarkerFunc, ArrayRef<Value *> Objects,
1082 bool InsertBefore) {
1083 for (Value *Mem : Objects) {
1084 assert((!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() ==
1085 TheCall->getFunction()) &&
1086 "Input memory not defined in original function");
1087 Value *&MemAsI8Ptr = Bitcasts[Mem];
1088 if (!MemAsI8Ptr) {
1089 if (Mem->getType() == Int8PtrTy)
1090 MemAsI8Ptr = Mem;
1091 else
1092 MemAsI8Ptr =
1093 CastInst::CreatePointerCast(Mem, Int8PtrTy, "lt.cast", TheCall);
1094 }
1095
1096 auto Marker = CallInst::Create(MarkerFunc, {NegativeOne, MemAsI8Ptr});
1097 if (InsertBefore)
1098 Marker->insertBefore(TheCall);
1099 else
1100 Marker->insertBefore(Term);
1101 }
1102 };
1103
1104 if (!LifetimesStart.empty()) {
1105 auto StartFn = llvm::Intrinsic::getDeclaration(
1106 M, llvm::Intrinsic::lifetime_start, Int8PtrTy);
1107 insertMarkers(StartFn, LifetimesStart, /*InsertBefore=*/true);
1108 }
1109
1110 if (!LifetimesEnd.empty()) {
1111 auto EndFn = llvm::Intrinsic::getDeclaration(
1112 M, llvm::Intrinsic::lifetime_end, Int8PtrTy);
1113 insertMarkers(EndFn, LifetimesEnd, /*InsertBefore=*/false);
1114 }
1115 }
1116
1117 /// emitCallAndSwitchStatement - This method sets up the caller side by adding
1118 /// the call instruction, splitting any PHI nodes in the header block as
1119 /// necessary.
emitCallAndSwitchStatement(Function * newFunction,BasicBlock * codeReplacer,ValueSet & inputs,ValueSet & outputs)1120 CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
1121 BasicBlock *codeReplacer,
1122 ValueSet &inputs,
1123 ValueSet &outputs) {
1124 // Emit a call to the new function, passing in: *pointer to struct (if
1125 // aggregating parameters), or plan inputs and allocated memory for outputs
1126 std::vector<Value *> params, StructValues, ReloadOutputs, Reloads;
1127
1128 Module *M = newFunction->getParent();
1129 LLVMContext &Context = M->getContext();
1130 const DataLayout &DL = M->getDataLayout();
1131 CallInst *call = nullptr;
1132
1133 // Add inputs as params, or to be filled into the struct
1134 unsigned ArgNo = 0;
1135 SmallVector<unsigned, 1> SwiftErrorArgs;
1136 for (Value *input : inputs) {
1137 if (AggregateArgs)
1138 StructValues.push_back(input);
1139 else {
1140 params.push_back(input);
1141 if (input->isSwiftError())
1142 SwiftErrorArgs.push_back(ArgNo);
1143 }
1144 ++ArgNo;
1145 }
1146
1147 // Create allocas for the outputs
1148 for (Value *output : outputs) {
1149 if (AggregateArgs) {
1150 StructValues.push_back(output);
1151 } else {
1152 AllocaInst *alloca =
1153 new AllocaInst(output->getType(), DL.getAllocaAddrSpace(),
1154 nullptr, output->getName() + ".loc",
1155 &codeReplacer->getParent()->front().front());
1156 ReloadOutputs.push_back(alloca);
1157 params.push_back(alloca);
1158 }
1159 }
1160
1161 StructType *StructArgTy = nullptr;
1162 AllocaInst *Struct = nullptr;
1163 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
1164 std::vector<Type *> ArgTypes;
1165 for (Value *V : StructValues)
1166 ArgTypes.push_back(V->getType());
1167
1168 // Allocate a struct at the beginning of this function
1169 StructArgTy = StructType::get(newFunction->getContext(), ArgTypes);
1170 Struct = new AllocaInst(StructArgTy, DL.getAllocaAddrSpace(), nullptr,
1171 "structArg",
1172 &codeReplacer->getParent()->front().front());
1173 params.push_back(Struct);
1174
1175 for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
1176 Value *Idx[2];
1177 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
1178 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), i);
1179 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1180 StructArgTy, Struct, Idx, "gep_" + StructValues[i]->getName());
1181 codeReplacer->getInstList().push_back(GEP);
1182 new StoreInst(StructValues[i], GEP, codeReplacer);
1183 }
1184 }
1185
1186 // Emit the call to the function
1187 call = CallInst::Create(newFunction, params,
1188 NumExitBlocks > 1 ? "targetBlock" : "");
1189 // Add debug location to the new call, if the original function has debug
1190 // info. In that case, the terminator of the entry block of the extracted
1191 // function contains the first debug location of the extracted function,
1192 // set in extractCodeRegion.
1193 if (codeReplacer->getParent()->getSubprogram()) {
1194 if (auto DL = newFunction->getEntryBlock().getTerminator()->getDebugLoc())
1195 call->setDebugLoc(DL);
1196 }
1197 codeReplacer->getInstList().push_back(call);
1198
1199 // Set swifterror parameter attributes.
1200 for (unsigned SwiftErrArgNo : SwiftErrorArgs) {
1201 call->addParamAttr(SwiftErrArgNo, Attribute::SwiftError);
1202 newFunction->addParamAttr(SwiftErrArgNo, Attribute::SwiftError);
1203 }
1204
1205 Function::arg_iterator OutputArgBegin = newFunction->arg_begin();
1206 unsigned FirstOut = inputs.size();
1207 if (!AggregateArgs)
1208 std::advance(OutputArgBegin, inputs.size());
1209
1210 // Reload the outputs passed in by reference.
1211 for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
1212 Value *Output = nullptr;
1213 if (AggregateArgs) {
1214 Value *Idx[2];
1215 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
1216 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i);
1217 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1218 StructArgTy, Struct, Idx, "gep_reload_" + outputs[i]->getName());
1219 codeReplacer->getInstList().push_back(GEP);
1220 Output = GEP;
1221 } else {
1222 Output = ReloadOutputs[i];
1223 }
1224 LoadInst *load = new LoadInst(outputs[i]->getType(), Output,
1225 outputs[i]->getName() + ".reload",
1226 codeReplacer);
1227 Reloads.push_back(load);
1228 std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end());
1229 for (unsigned u = 0, e = Users.size(); u != e; ++u) {
1230 Instruction *inst = cast<Instruction>(Users[u]);
1231 if (!Blocks.count(inst->getParent()))
1232 inst->replaceUsesOfWith(outputs[i], load);
1233 }
1234 }
1235
1236 // Now we can emit a switch statement using the call as a value.
1237 SwitchInst *TheSwitch =
1238 SwitchInst::Create(Constant::getNullValue(Type::getInt16Ty(Context)),
1239 codeReplacer, 0, codeReplacer);
1240
1241 // Since there may be multiple exits from the original region, make the new
1242 // function return an unsigned, switch on that number. This loop iterates
1243 // over all of the blocks in the extracted region, updating any terminator
1244 // instructions in the to-be-extracted region that branch to blocks that are
1245 // not in the region to be extracted.
1246 std::map<BasicBlock *, BasicBlock *> ExitBlockMap;
1247
1248 unsigned switchVal = 0;
1249 for (BasicBlock *Block : Blocks) {
1250 Instruction *TI = Block->getTerminator();
1251 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
1252 if (!Blocks.count(TI->getSuccessor(i))) {
1253 BasicBlock *OldTarget = TI->getSuccessor(i);
1254 // add a new basic block which returns the appropriate value
1255 BasicBlock *&NewTarget = ExitBlockMap[OldTarget];
1256 if (!NewTarget) {
1257 // If we don't already have an exit stub for this non-extracted
1258 // destination, create one now!
1259 NewTarget = BasicBlock::Create(Context,
1260 OldTarget->getName() + ".exitStub",
1261 newFunction);
1262 unsigned SuccNum = switchVal++;
1263
1264 Value *brVal = nullptr;
1265 switch (NumExitBlocks) {
1266 case 0:
1267 case 1: break; // No value needed.
1268 case 2: // Conditional branch, return a bool
1269 brVal = ConstantInt::get(Type::getInt1Ty(Context), !SuccNum);
1270 break;
1271 default:
1272 brVal = ConstantInt::get(Type::getInt16Ty(Context), SuccNum);
1273 break;
1274 }
1275
1276 ReturnInst::Create(Context, brVal, NewTarget);
1277
1278 // Update the switch instruction.
1279 TheSwitch->addCase(ConstantInt::get(Type::getInt16Ty(Context),
1280 SuccNum),
1281 OldTarget);
1282 }
1283
1284 // rewrite the original branch instruction with this new target
1285 TI->setSuccessor(i, NewTarget);
1286 }
1287 }
1288
1289 // Store the arguments right after the definition of output value.
1290 // This should be proceeded after creating exit stubs to be ensure that invoke
1291 // result restore will be placed in the outlined function.
1292 Function::arg_iterator OAI = OutputArgBegin;
1293 for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
1294 auto *OutI = dyn_cast<Instruction>(outputs[i]);
1295 if (!OutI)
1296 continue;
1297
1298 // Find proper insertion point.
1299 BasicBlock::iterator InsertPt;
1300 // In case OutI is an invoke, we insert the store at the beginning in the
1301 // 'normal destination' BB. Otherwise we insert the store right after OutI.
1302 if (auto *InvokeI = dyn_cast<InvokeInst>(OutI))
1303 InsertPt = InvokeI->getNormalDest()->getFirstInsertionPt();
1304 else if (auto *Phi = dyn_cast<PHINode>(OutI))
1305 InsertPt = Phi->getParent()->getFirstInsertionPt();
1306 else
1307 InsertPt = std::next(OutI->getIterator());
1308
1309 Instruction *InsertBefore = &*InsertPt;
1310 assert((InsertBefore->getFunction() == newFunction ||
1311 Blocks.count(InsertBefore->getParent())) &&
1312 "InsertPt should be in new function");
1313 assert(OAI != newFunction->arg_end() &&
1314 "Number of output arguments should match "
1315 "the amount of defined values");
1316 if (AggregateArgs) {
1317 Value *Idx[2];
1318 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
1319 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i);
1320 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1321 StructArgTy, &*OAI, Idx, "gep_" + outputs[i]->getName(),
1322 InsertBefore);
1323 new StoreInst(outputs[i], GEP, InsertBefore);
1324 // Since there should be only one struct argument aggregating
1325 // all the output values, we shouldn't increment OAI, which always
1326 // points to the struct argument, in this case.
1327 } else {
1328 new StoreInst(outputs[i], &*OAI, InsertBefore);
1329 ++OAI;
1330 }
1331 }
1332
1333 // Now that we've done the deed, simplify the switch instruction.
1334 Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType();
1335 switch (NumExitBlocks) {
1336 case 0:
1337 // There are no successors (the block containing the switch itself), which
1338 // means that previously this was the last part of the function, and hence
1339 // this should be rewritten as a `ret'
1340
1341 // Check if the function should return a value
1342 if (OldFnRetTy->isVoidTy()) {
1343 ReturnInst::Create(Context, nullptr, TheSwitch); // Return void
1344 } else if (OldFnRetTy == TheSwitch->getCondition()->getType()) {
1345 // return what we have
1346 ReturnInst::Create(Context, TheSwitch->getCondition(), TheSwitch);
1347 } else {
1348 // Otherwise we must have code extracted an unwind or something, just
1349 // return whatever we want.
1350 ReturnInst::Create(Context,
1351 Constant::getNullValue(OldFnRetTy), TheSwitch);
1352 }
1353
1354 TheSwitch->eraseFromParent();
1355 break;
1356 case 1:
1357 // Only a single destination, change the switch into an unconditional
1358 // branch.
1359 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch);
1360 TheSwitch->eraseFromParent();
1361 break;
1362 case 2:
1363 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch->getSuccessor(2),
1364 call, TheSwitch);
1365 TheSwitch->eraseFromParent();
1366 break;
1367 default:
1368 // Otherwise, make the default destination of the switch instruction be one
1369 // of the other successors.
1370 TheSwitch->setCondition(call);
1371 TheSwitch->setDefaultDest(TheSwitch->getSuccessor(NumExitBlocks));
1372 // Remove redundant case
1373 TheSwitch->removeCase(SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1));
1374 break;
1375 }
1376
1377 // Insert lifetime markers around the reloads of any output values. The
1378 // allocas output values are stored in are only in-use in the codeRepl block.
1379 insertLifetimeMarkersSurroundingCall(M, ReloadOutputs, ReloadOutputs, call);
1380
1381 return call;
1382 }
1383
moveCodeToFunction(Function * newFunction)1384 void CodeExtractor::moveCodeToFunction(Function *newFunction) {
1385 Function *oldFunc = (*Blocks.begin())->getParent();
1386 Function::BasicBlockListType &oldBlocks = oldFunc->getBasicBlockList();
1387 Function::BasicBlockListType &newBlocks = newFunction->getBasicBlockList();
1388
1389 for (BasicBlock *Block : Blocks) {
1390 // Delete the basic block from the old function, and the list of blocks
1391 oldBlocks.remove(Block);
1392
1393 // Insert this basic block into the new function
1394 newBlocks.push_back(Block);
1395 }
1396 }
1397
calculateNewCallTerminatorWeights(BasicBlock * CodeReplacer,DenseMap<BasicBlock *,BlockFrequency> & ExitWeights,BranchProbabilityInfo * BPI)1398 void CodeExtractor::calculateNewCallTerminatorWeights(
1399 BasicBlock *CodeReplacer,
1400 DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
1401 BranchProbabilityInfo *BPI) {
1402 using Distribution = BlockFrequencyInfoImplBase::Distribution;
1403 using BlockNode = BlockFrequencyInfoImplBase::BlockNode;
1404
1405 // Update the branch weights for the exit block.
1406 Instruction *TI = CodeReplacer->getTerminator();
1407 SmallVector<unsigned, 8> BranchWeights(TI->getNumSuccessors(), 0);
1408
1409 // Block Frequency distribution with dummy node.
1410 Distribution BranchDist;
1411
1412 SmallVector<BranchProbability, 4> EdgeProbabilities(
1413 TI->getNumSuccessors(), BranchProbability::getUnknown());
1414
1415 // Add each of the frequencies of the successors.
1416 for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) {
1417 BlockNode ExitNode(i);
1418 uint64_t ExitFreq = ExitWeights[TI->getSuccessor(i)].getFrequency();
1419 if (ExitFreq != 0)
1420 BranchDist.addExit(ExitNode, ExitFreq);
1421 else
1422 EdgeProbabilities[i] = BranchProbability::getZero();
1423 }
1424
1425 // Check for no total weight.
1426 if (BranchDist.Total == 0) {
1427 BPI->setEdgeProbability(CodeReplacer, EdgeProbabilities);
1428 return;
1429 }
1430
1431 // Normalize the distribution so that they can fit in unsigned.
1432 BranchDist.normalize();
1433
1434 // Create normalized branch weights and set the metadata.
1435 for (unsigned I = 0, E = BranchDist.Weights.size(); I < E; ++I) {
1436 const auto &Weight = BranchDist.Weights[I];
1437
1438 // Get the weight and update the current BFI.
1439 BranchWeights[Weight.TargetNode.Index] = Weight.Amount;
1440 BranchProbability BP(Weight.Amount, BranchDist.Total);
1441 EdgeProbabilities[Weight.TargetNode.Index] = BP;
1442 }
1443 BPI->setEdgeProbability(CodeReplacer, EdgeProbabilities);
1444 TI->setMetadata(
1445 LLVMContext::MD_prof,
1446 MDBuilder(TI->getContext()).createBranchWeights(BranchWeights));
1447 }
1448
1449 /// Erase debug info intrinsics which refer to values in \p F but aren't in
1450 /// \p F.
eraseDebugIntrinsicsWithNonLocalRefs(Function & F)1451 static void eraseDebugIntrinsicsWithNonLocalRefs(Function &F) {
1452 for (Instruction &I : instructions(F)) {
1453 SmallVector<DbgVariableIntrinsic *, 4> DbgUsers;
1454 findDbgUsers(DbgUsers, &I);
1455 for (DbgVariableIntrinsic *DVI : DbgUsers)
1456 if (DVI->getFunction() != &F)
1457 DVI->eraseFromParent();
1458 }
1459 }
1460
1461 /// Fix up the debug info in the old and new functions by pointing line
1462 /// locations and debug intrinsics to the new subprogram scope, and by deleting
1463 /// intrinsics which point to values outside of the new function.
fixupDebugInfoPostExtraction(Function & OldFunc,Function & NewFunc,CallInst & TheCall)1464 static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
1465 CallInst &TheCall) {
1466 DISubprogram *OldSP = OldFunc.getSubprogram();
1467 LLVMContext &Ctx = OldFunc.getContext();
1468
1469 if (!OldSP) {
1470 // Erase any debug info the new function contains.
1471 stripDebugInfo(NewFunc);
1472 // Make sure the old function doesn't contain any non-local metadata refs.
1473 eraseDebugIntrinsicsWithNonLocalRefs(NewFunc);
1474 return;
1475 }
1476
1477 // Create a subprogram for the new function. Leave out a description of the
1478 // function arguments, as the parameters don't correspond to anything at the
1479 // source level.
1480 assert(OldSP->getUnit() && "Missing compile unit for subprogram");
1481 DIBuilder DIB(*OldFunc.getParent(), /*AllowUnresolved=*/false,
1482 OldSP->getUnit());
1483 auto SPType = DIB.createSubroutineType(DIB.getOrCreateTypeArray(None));
1484 DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition |
1485 DISubprogram::SPFlagOptimized |
1486 DISubprogram::SPFlagLocalToUnit;
1487 auto NewSP = DIB.createFunction(
1488 OldSP->getUnit(), NewFunc.getName(), NewFunc.getName(), OldSP->getFile(),
1489 /*LineNo=*/0, SPType, /*ScopeLine=*/0, DINode::FlagZero, SPFlags);
1490 NewFunc.setSubprogram(NewSP);
1491
1492 // Debug intrinsics in the new function need to be updated in one of two
1493 // ways:
1494 // 1) They need to be deleted, because they describe a value in the old
1495 // function.
1496 // 2) They need to point to fresh metadata, e.g. because they currently
1497 // point to a variable in the wrong scope.
1498 SmallDenseMap<DINode *, DINode *> RemappedMetadata;
1499 SmallVector<Instruction *, 4> DebugIntrinsicsToDelete;
1500 for (Instruction &I : instructions(NewFunc)) {
1501 auto *DII = dyn_cast<DbgInfoIntrinsic>(&I);
1502 if (!DII)
1503 continue;
1504
1505 // Point the intrinsic to a fresh label within the new function.
1506 if (auto *DLI = dyn_cast<DbgLabelInst>(&I)) {
1507 DILabel *OldLabel = DLI->getLabel();
1508 DINode *&NewLabel = RemappedMetadata[OldLabel];
1509 if (!NewLabel)
1510 NewLabel = DILabel::get(Ctx, NewSP, OldLabel->getName(),
1511 OldLabel->getFile(), OldLabel->getLine());
1512 DLI->setArgOperand(0, MetadataAsValue::get(Ctx, NewLabel));
1513 continue;
1514 }
1515
1516 auto IsInvalidLocation = [&NewFunc](Value *Location) {
1517 // Location is invalid if it isn't a constant or an instruction, or is an
1518 // instruction but isn't in the new function.
1519 if (!Location ||
1520 (!isa<Constant>(Location) && !isa<Instruction>(Location)))
1521 return true;
1522 Instruction *LocationInst = dyn_cast<Instruction>(Location);
1523 return LocationInst && LocationInst->getFunction() != &NewFunc;
1524 };
1525
1526 auto *DVI = cast<DbgVariableIntrinsic>(DII);
1527 // If any of the used locations are invalid, delete the intrinsic.
1528 if (any_of(DVI->location_ops(), IsInvalidLocation)) {
1529 DebugIntrinsicsToDelete.push_back(DVI);
1530 continue;
1531 }
1532
1533 // Point the intrinsic to a fresh variable within the new function.
1534 DILocalVariable *OldVar = DVI->getVariable();
1535 DINode *&NewVar = RemappedMetadata[OldVar];
1536 if (!NewVar)
1537 NewVar = DIB.createAutoVariable(
1538 NewSP, OldVar->getName(), OldVar->getFile(), OldVar->getLine(),
1539 OldVar->getType(), /*AlwaysPreserve=*/false, DINode::FlagZero,
1540 OldVar->getAlignInBits());
1541 DVI->setVariable(cast<DILocalVariable>(NewVar));
1542 }
1543 for (auto *DII : DebugIntrinsicsToDelete)
1544 DII->eraseFromParent();
1545 DIB.finalizeSubprogram(NewSP);
1546
1547 // Fix up the scope information attached to the line locations in the new
1548 // function.
1549 for (Instruction &I : instructions(NewFunc)) {
1550 if (const DebugLoc &DL = I.getDebugLoc())
1551 I.setDebugLoc(DILocation::get(Ctx, DL.getLine(), DL.getCol(), NewSP));
1552
1553 // Loop info metadata may contain line locations. Fix them up.
1554 auto updateLoopInfoLoc = [&Ctx,
1555 NewSP](const DILocation &Loc) -> DILocation * {
1556 return DILocation::get(Ctx, Loc.getLine(), Loc.getColumn(), NewSP,
1557 nullptr);
1558 };
1559 updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1560 }
1561 if (!TheCall.getDebugLoc())
1562 TheCall.setDebugLoc(DILocation::get(Ctx, 0, 0, OldSP));
1563
1564 eraseDebugIntrinsicsWithNonLocalRefs(NewFunc);
1565 }
1566
1567 Function *
extractCodeRegion(const CodeExtractorAnalysisCache & CEAC)1568 CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC) {
1569 if (!isEligible())
1570 return nullptr;
1571
1572 // Assumption: this is a single-entry code region, and the header is the first
1573 // block in the region.
1574 BasicBlock *header = *Blocks.begin();
1575 Function *oldFunction = header->getParent();
1576
1577 // Calculate the entry frequency of the new function before we change the root
1578 // block.
1579 BlockFrequency EntryFreq;
1580 if (BFI) {
1581 assert(BPI && "Both BPI and BFI are required to preserve profile info");
1582 for (BasicBlock *Pred : predecessors(header)) {
1583 if (Blocks.count(Pred))
1584 continue;
1585 EntryFreq +=
1586 BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, header);
1587 }
1588 }
1589
1590 // Remove @llvm.assume calls that will be moved to the new function from the
1591 // old function's assumption cache.
1592 for (BasicBlock *Block : Blocks) {
1593 for (auto It = Block->begin(), End = Block->end(); It != End;) {
1594 Instruction *I = &*It;
1595 ++It;
1596
1597 if (auto *AI = dyn_cast<AssumeInst>(I)) {
1598 if (AC)
1599 AC->unregisterAssumption(AI);
1600 AI->eraseFromParent();
1601 }
1602 }
1603 }
1604
1605 // If we have any return instructions in the region, split those blocks so
1606 // that the return is not in the region.
1607 splitReturnBlocks();
1608
1609 // Calculate the exit blocks for the extracted region and the total exit
1610 // weights for each of those blocks.
1611 DenseMap<BasicBlock *, BlockFrequency> ExitWeights;
1612 SmallPtrSet<BasicBlock *, 1> ExitBlocks;
1613 for (BasicBlock *Block : Blocks) {
1614 for (BasicBlock *Succ : successors(Block)) {
1615 if (!Blocks.count(Succ)) {
1616 // Update the branch weight for this successor.
1617 if (BFI) {
1618 BlockFrequency &BF = ExitWeights[Succ];
1619 BF += BFI->getBlockFreq(Block) * BPI->getEdgeProbability(Block, Succ);
1620 }
1621 ExitBlocks.insert(Succ);
1622 }
1623 }
1624 }
1625 NumExitBlocks = ExitBlocks.size();
1626
1627 // If we have to split PHI nodes of the entry or exit blocks, do so now.
1628 severSplitPHINodesOfEntry(header);
1629 severSplitPHINodesOfExits(ExitBlocks);
1630
1631 // This takes place of the original loop
1632 BasicBlock *codeReplacer = BasicBlock::Create(header->getContext(),
1633 "codeRepl", oldFunction,
1634 header);
1635
1636 // The new function needs a root node because other nodes can branch to the
1637 // head of the region, but the entry node of a function cannot have preds.
1638 BasicBlock *newFuncRoot = BasicBlock::Create(header->getContext(),
1639 "newFuncRoot");
1640 auto *BranchI = BranchInst::Create(header);
1641 // If the original function has debug info, we have to add a debug location
1642 // to the new branch instruction from the artificial entry block.
1643 // We use the debug location of the first instruction in the extracted
1644 // blocks, as there is no other equivalent line in the source code.
1645 if (oldFunction->getSubprogram()) {
1646 any_of(Blocks, [&BranchI](const BasicBlock *BB) {
1647 return any_of(*BB, [&BranchI](const Instruction &I) {
1648 if (!I.getDebugLoc())
1649 return false;
1650 BranchI->setDebugLoc(I.getDebugLoc());
1651 return true;
1652 });
1653 });
1654 }
1655 newFuncRoot->getInstList().push_back(BranchI);
1656
1657 ValueSet inputs, outputs, SinkingCands, HoistingCands;
1658 BasicBlock *CommonExit = nullptr;
1659 findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
1660 assert(HoistingCands.empty() || CommonExit);
1661
1662 // Find inputs to, outputs from the code region.
1663 findInputsOutputs(inputs, outputs, SinkingCands);
1664
1665 // Now sink all instructions which only have non-phi uses inside the region.
1666 // Group the allocas at the start of the block, so that any bitcast uses of
1667 // the allocas are well-defined.
1668 AllocaInst *FirstSunkAlloca = nullptr;
1669 for (auto *II : SinkingCands) {
1670 if (auto *AI = dyn_cast<AllocaInst>(II)) {
1671 AI->moveBefore(*newFuncRoot, newFuncRoot->getFirstInsertionPt());
1672 if (!FirstSunkAlloca)
1673 FirstSunkAlloca = AI;
1674 }
1675 }
1676 assert((SinkingCands.empty() || FirstSunkAlloca) &&
1677 "Did not expect a sink candidate without any allocas");
1678 for (auto *II : SinkingCands) {
1679 if (!isa<AllocaInst>(II)) {
1680 cast<Instruction>(II)->moveAfter(FirstSunkAlloca);
1681 }
1682 }
1683
1684 if (!HoistingCands.empty()) {
1685 auto *HoistToBlock = findOrCreateBlockForHoisting(CommonExit);
1686 Instruction *TI = HoistToBlock->getTerminator();
1687 for (auto *II : HoistingCands)
1688 cast<Instruction>(II)->moveBefore(TI);
1689 }
1690
1691 // Collect objects which are inputs to the extraction region and also
1692 // referenced by lifetime start markers within it. The effects of these
1693 // markers must be replicated in the calling function to prevent the stack
1694 // coloring pass from merging slots which store input objects.
1695 ValueSet LifetimesStart;
1696 eraseLifetimeMarkersOnInputs(Blocks, SinkingCands, LifetimesStart);
1697
1698 // Construct new function based on inputs/outputs & add allocas for all defs.
1699 Function *newFunction =
1700 constructFunction(inputs, outputs, header, newFuncRoot, codeReplacer,
1701 oldFunction, oldFunction->getParent());
1702
1703 // Update the entry count of the function.
1704 if (BFI) {
1705 auto Count = BFI->getProfileCountFromFreq(EntryFreq.getFrequency());
1706 if (Count.hasValue())
1707 newFunction->setEntryCount(
1708 ProfileCount(Count.getValue(), Function::PCT_Real)); // FIXME
1709 BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency());
1710 }
1711
1712 CallInst *TheCall =
1713 emitCallAndSwitchStatement(newFunction, codeReplacer, inputs, outputs);
1714
1715 moveCodeToFunction(newFunction);
1716
1717 // Replicate the effects of any lifetime start/end markers which referenced
1718 // input objects in the extraction region by placing markers around the call.
1719 insertLifetimeMarkersSurroundingCall(
1720 oldFunction->getParent(), LifetimesStart.getArrayRef(), {}, TheCall);
1721
1722 // Propagate personality info to the new function if there is one.
1723 if (oldFunction->hasPersonalityFn())
1724 newFunction->setPersonalityFn(oldFunction->getPersonalityFn());
1725
1726 // Update the branch weights for the exit block.
1727 if (BFI && NumExitBlocks > 1)
1728 calculateNewCallTerminatorWeights(codeReplacer, ExitWeights, BPI);
1729
1730 // Loop over all of the PHI nodes in the header and exit blocks, and change
1731 // any references to the old incoming edge to be the new incoming edge.
1732 for (BasicBlock::iterator I = header->begin(); isa<PHINode>(I); ++I) {
1733 PHINode *PN = cast<PHINode>(I);
1734 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1735 if (!Blocks.count(PN->getIncomingBlock(i)))
1736 PN->setIncomingBlock(i, newFuncRoot);
1737 }
1738
1739 for (BasicBlock *ExitBB : ExitBlocks)
1740 for (PHINode &PN : ExitBB->phis()) {
1741 Value *IncomingCodeReplacerVal = nullptr;
1742 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1743 // Ignore incoming values from outside of the extracted region.
1744 if (!Blocks.count(PN.getIncomingBlock(i)))
1745 continue;
1746
1747 // Ensure that there is only one incoming value from codeReplacer.
1748 if (!IncomingCodeReplacerVal) {
1749 PN.setIncomingBlock(i, codeReplacer);
1750 IncomingCodeReplacerVal = PN.getIncomingValue(i);
1751 } else
1752 assert(IncomingCodeReplacerVal == PN.getIncomingValue(i) &&
1753 "PHI has two incompatbile incoming values from codeRepl");
1754 }
1755 }
1756
1757 fixupDebugInfoPostExtraction(*oldFunction, *newFunction, *TheCall);
1758
1759 // Mark the new function `noreturn` if applicable. Terminators which resume
1760 // exception propagation are treated as returning instructions. This is to
1761 // avoid inserting traps after calls to outlined functions which unwind.
1762 bool doesNotReturn = none_of(*newFunction, [](const BasicBlock &BB) {
1763 const Instruction *Term = BB.getTerminator();
1764 return isa<ReturnInst>(Term) || isa<ResumeInst>(Term);
1765 });
1766 if (doesNotReturn)
1767 newFunction->setDoesNotReturn();
1768
1769 LLVM_DEBUG(if (verifyFunction(*newFunction, &errs())) {
1770 newFunction->dump();
1771 report_fatal_error("verification of newFunction failed!");
1772 });
1773 LLVM_DEBUG(if (verifyFunction(*oldFunction))
1774 report_fatal_error("verification of oldFunction failed!"));
1775 LLVM_DEBUG(if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC))
1776 report_fatal_error("Stale Asumption cache for old Function!"));
1777 return newFunction;
1778 }
1779
verifyAssumptionCache(const Function & OldFunc,const Function & NewFunc,AssumptionCache * AC)1780 bool CodeExtractor::verifyAssumptionCache(const Function &OldFunc,
1781 const Function &NewFunc,
1782 AssumptionCache *AC) {
1783 for (auto AssumeVH : AC->assumptions()) {
1784 auto *I = dyn_cast_or_null<CallInst>(AssumeVH);
1785 if (!I)
1786 continue;
1787
1788 // There shouldn't be any llvm.assume intrinsics in the new function.
1789 if (I->getFunction() != &OldFunc)
1790 return true;
1791
1792 // There shouldn't be any stale affected values in the assumption cache
1793 // that were previously in the old function, but that have now been moved
1794 // to the new function.
1795 for (auto AffectedValVH : AC->assumptionsFor(I->getOperand(0))) {
1796 auto *AffectedCI = dyn_cast_or_null<CallInst>(AffectedValVH);
1797 if (!AffectedCI)
1798 continue;
1799 if (AffectedCI->getFunction() != &OldFunc)
1800 return true;
1801 auto *AssumedInst = cast<Instruction>(AffectedCI->getOperand(0));
1802 if (AssumedInst->getFunction() != &OldFunc)
1803 return true;
1804 }
1805 }
1806 return false;
1807 }
1808