xref: /llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp (revision b611e3f50e6c9fb564864e437ad8fd26809f0a19)
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/DIBuilder.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/MDBuilder.h"
40 #include "llvm/IR/Module.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 #include "llvm/Support/CommandLine.h"
43 #include <algorithm>
44 using namespace llvm;
45 
46 static cl::opt<bool>
47 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
48   cl::Hidden,
49   cl::desc("Convert noalias attributes to metadata during inlining."));
50 
51 static cl::opt<bool>
52 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
53   cl::init(true), cl::Hidden,
54   cl::desc("Convert align attributes to assumptions during inlining."));
55 
56 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
57                           bool InsertLifetime) {
58   return InlineFunction(CallSite(CI), IFI, InsertLifetime);
59 }
60 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
61                           bool InsertLifetime) {
62   return InlineFunction(CallSite(II), IFI, InsertLifetime);
63 }
64 
65 namespace {
66   /// A class for recording information about inlining a landing pad.
67   class LandingPadInliningInfo {
68     BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
69     BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
70     LandingPadInst *CallerLPad;  ///< LandingPadInst associated with the invoke.
71     PHINode *InnerEHValuesPHI;   ///< PHI for EH values from landingpad insts.
72     SmallVector<Value*, 8> UnwindDestPHIValues;
73 
74   public:
75     LandingPadInliningInfo(InvokeInst *II)
76       : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
77         CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
78       // If there are PHI nodes in the unwind destination block, we need to keep
79       // track of which values came into them from the invoke before removing
80       // the edge from this block.
81       llvm::BasicBlock *InvokeBB = II->getParent();
82       BasicBlock::iterator I = OuterResumeDest->begin();
83       for (; isa<PHINode>(I); ++I) {
84         // Save the value to use for this edge.
85         PHINode *PHI = cast<PHINode>(I);
86         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
87       }
88 
89       CallerLPad = cast<LandingPadInst>(I);
90     }
91 
92     /// The outer unwind destination is the target of
93     /// unwind edges introduced for calls within the inlined function.
94     BasicBlock *getOuterResumeDest() const {
95       return OuterResumeDest;
96     }
97 
98     BasicBlock *getInnerResumeDest();
99 
100     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
101 
102     /// Forward the 'resume' instruction to the caller's landing pad block.
103     /// When the landing pad block has only one predecessor, this is
104     /// a simple branch. When there is more than one predecessor, we need to
105     /// split the landing pad block after the landingpad instruction and jump
106     /// to there.
107     void forwardResume(ResumeInst *RI,
108                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
109 
110     /// Add incoming-PHI values to the unwind destination block for the given
111     /// basic block, using the values for the original invoke's source block.
112     void addIncomingPHIValuesFor(BasicBlock *BB) const {
113       addIncomingPHIValuesForInto(BB, OuterResumeDest);
114     }
115 
116     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
117       BasicBlock::iterator I = dest->begin();
118       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
119         PHINode *phi = cast<PHINode>(I);
120         phi->addIncoming(UnwindDestPHIValues[i], src);
121       }
122     }
123   };
124 }
125 
126 /// Get or create a target for the branch from ResumeInsts.
127 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
128   if (InnerResumeDest) return InnerResumeDest;
129 
130   // Split the landing pad.
131   BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
132   InnerResumeDest =
133     OuterResumeDest->splitBasicBlock(SplitPoint,
134                                      OuterResumeDest->getName() + ".body");
135 
136   // The number of incoming edges we expect to the inner landing pad.
137   const unsigned PHICapacity = 2;
138 
139   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
140   BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
141   BasicBlock::iterator I = OuterResumeDest->begin();
142   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
143     PHINode *OuterPHI = cast<PHINode>(I);
144     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
145                                         OuterPHI->getName() + ".lpad-body",
146                                         InsertPoint);
147     OuterPHI->replaceAllUsesWith(InnerPHI);
148     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
149   }
150 
151   // Create a PHI for the exception values.
152   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
153                                      "eh.lpad-body", InsertPoint);
154   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
155   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
156 
157   // All done.
158   return InnerResumeDest;
159 }
160 
161 /// Forward the 'resume' instruction to the caller's landing pad block.
162 /// When the landing pad block has only one predecessor, this is a simple
163 /// branch. When there is more than one predecessor, we need to split the
164 /// landing pad block after the landingpad instruction and jump to there.
165 void LandingPadInliningInfo::forwardResume(
166     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
167   BasicBlock *Dest = getInnerResumeDest();
168   BasicBlock *Src = RI->getParent();
169 
170   BranchInst::Create(Dest, Src);
171 
172   // Update the PHIs in the destination. They were inserted in an order which
173   // makes this work.
174   addIncomingPHIValuesForInto(Src, Dest);
175 
176   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
177   RI->eraseFromParent();
178 }
179 
180 /// When we inline a basic block into an invoke,
181 /// we have to turn all of the calls that can throw into invokes.
182 /// This function analyze BB to see if there are any calls, and if so,
183 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
184 /// nodes in that block with the values specified in InvokeDestPHIValues.
185 static BasicBlock *
186 HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge) {
187   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
188     Instruction *I = BBI++;
189 
190     // We only need to check for function calls: inlined invoke
191     // instructions require no special handling.
192     CallInst *CI = dyn_cast<CallInst>(I);
193 
194     // If this call cannot unwind, don't convert it to an invoke.
195     // Inline asm calls cannot throw.
196     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
197       continue;
198 
199     // Convert this function call into an invoke instruction.  First, split the
200     // basic block.
201     BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
202 
203     // Delete the unconditional branch inserted by splitBasicBlock
204     BB->getInstList().pop_back();
205 
206     // Create the new invoke instruction.
207     ImmutableCallSite CS(CI);
208     SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
209     InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
210                                         InvokeArgs, CI->getName(), BB);
211     II->setDebugLoc(CI->getDebugLoc());
212     II->setCallingConv(CI->getCallingConv());
213     II->setAttributes(CI->getAttributes());
214 
215     // Make sure that anything using the call now uses the invoke!  This also
216     // updates the CallGraph if present, because it uses a WeakVH.
217     CI->replaceAllUsesWith(II);
218 
219     // Delete the original call
220     Split->getInstList().pop_front();
221     return BB;
222   }
223   return nullptr;
224 }
225 
226 /// If we inlined an invoke site, we need to convert calls
227 /// in the body of the inlined function into invokes.
228 ///
229 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
230 /// block of the inlined code (the last block is the end of the function),
231 /// and InlineCodeInfo is information about the code that got inlined.
232 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
233                                     ClonedCodeInfo &InlinedCodeInfo) {
234   BasicBlock *InvokeDest = II->getUnwindDest();
235 
236   Function *Caller = FirstNewBlock->getParent();
237 
238   // The inlined code is currently at the end of the function, scan from the
239   // start of the inlined code to its end, checking for stuff we need to
240   // rewrite.
241   LandingPadInliningInfo Invoke(II);
242 
243   // Get all of the inlined landing pad instructions.
244   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
245   for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
246     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
247       InlinedLPads.insert(II->getLandingPadInst());
248 
249   // Append the clauses from the outer landing pad instruction into the inlined
250   // landing pad instructions.
251   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
252   for (LandingPadInst *InlinedLPad : InlinedLPads) {
253     unsigned OuterNum = OuterLPad->getNumClauses();
254     InlinedLPad->reserveClauses(OuterNum);
255     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
256       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
257     if (OuterLPad->isCleanup())
258       InlinedLPad->setCleanup(true);
259   }
260 
261   for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
262     if (InlinedCodeInfo.ContainsCalls)
263       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
264               BB, Invoke.getOuterResumeDest()))
265         // Update any PHI nodes in the exceptional block to indicate that there
266         // is now a new entry in them.
267         Invoke.addIncomingPHIValuesFor(NewBB);
268 
269     // Forward any resumes that are remaining here.
270     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
271       Invoke.forwardResume(RI, InlinedLPads);
272   }
273 
274   // Now that everything is happy, we have one final detail.  The PHI nodes in
275   // the exception destination block still have entries due to the original
276   // invoke instruction. Eliminate these entries (which might even delete the
277   // PHI node) now.
278   InvokeDest->removePredecessor(II->getParent());
279 }
280 
281 /// If we inlined an invoke site, we need to convert calls
282 /// in the body of the inlined function into invokes.
283 ///
284 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
285 /// block of the inlined code (the last block is the end of the function),
286 /// and InlineCodeInfo is information about the code that got inlined.
287 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
288                                ClonedCodeInfo &InlinedCodeInfo) {
289   BasicBlock *UnwindDest = II->getUnwindDest();
290   Function *Caller = FirstNewBlock->getParent();
291 
292   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
293 
294   // If there are PHI nodes in the unwind destination block, we need to keep
295   // track of which values came into them from the invoke before removing the
296   // edge from this block.
297   SmallVector<Value *, 8> UnwindDestPHIValues;
298   llvm::BasicBlock *InvokeBB = II->getParent();
299   for (Instruction &I : *UnwindDest) {
300     // Save the value to use for this edge.
301     PHINode *PHI = dyn_cast<PHINode>(&I);
302     if (!PHI)
303       break;
304     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
305   }
306 
307   // Add incoming-PHI values to the unwind destination block for the given basic
308   // block, using the values for the original invoke's source block.
309   auto UpdatePHINodes = [&](BasicBlock *Src) {
310     BasicBlock::iterator I = UnwindDest->begin();
311     for (Value *V : UnwindDestPHIValues) {
312       PHINode *PHI = cast<PHINode>(I);
313       PHI->addIncoming(V, Src);
314       ++I;
315     }
316   };
317 
318   // Forward EH terminator instructions to the caller's invoke destination.
319   // This is as simple as connect all the instructions which 'unwind to caller'
320   // to the invoke destination.
321   for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
322        ++BB) {
323     Instruction *I = BB->getFirstNonPHI();
324     if (I->isEHPad()) {
325       if (auto *CEPI = dyn_cast<CatchEndPadInst>(I)) {
326         if (CEPI->unwindsToCaller()) {
327           CatchEndPadInst::Create(CEPI->getContext(), UnwindDest, CEPI);
328           CEPI->eraseFromParent();
329           UpdatePHINodes(BB);
330         }
331       } else if (auto *TPI = dyn_cast<TerminatePadInst>(I)) {
332         if (TPI->unwindsToCaller()) {
333           SmallVector<Value *, 3> TerminatePadArgs;
334           for (Value *Operand : TPI->operands())
335             TerminatePadArgs.push_back(Operand);
336           TerminatePadInst::Create(TPI->getContext(), UnwindDest, TPI);
337           TPI->eraseFromParent();
338           UpdatePHINodes(BB);
339         }
340       } else {
341         assert(isa<CatchPadInst>(I) || isa<CleanupPadInst>(I));
342       }
343     }
344 
345     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
346       if (CRI->unwindsToCaller()) {
347         CleanupReturnInst::Create(CRI->getContext(), CRI->getReturnValue(),
348                                   UnwindDest, CRI);
349         CRI->eraseFromParent();
350         UpdatePHINodes(BB);
351       }
352     }
353   }
354 
355   if (InlinedCodeInfo.ContainsCalls)
356     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
357          ++BB)
358       if (BasicBlock *NewBB =
359               HandleCallsInBlockInlinedThroughInvoke(BB, UnwindDest))
360         // Update any PHI nodes in the exceptional block to indicate that there
361         // is now a new entry in them.
362         UpdatePHINodes(NewBB);
363 
364   // Now that everything is happy, we have one final detail.  The PHI nodes in
365   // the exception destination block still have entries due to the original
366   // invoke instruction. Eliminate these entries (which might even delete the
367   // PHI node) now.
368   UnwindDest->removePredecessor(InvokeBB);
369 }
370 
371 /// When inlining a function that contains noalias scope metadata,
372 /// this metadata needs to be cloned so that the inlined blocks
373 /// have different "unqiue scopes" at every call site. Were this not done, then
374 /// aliasing scopes from a function inlined into a caller multiple times could
375 /// not be differentiated (and this would lead to miscompiles because the
376 /// non-aliasing property communicated by the metadata could have
377 /// call-site-specific control dependencies).
378 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
379   const Function *CalledFunc = CS.getCalledFunction();
380   SetVector<const MDNode *> MD;
381 
382   // Note: We could only clone the metadata if it is already used in the
383   // caller. I'm omitting that check here because it might confuse
384   // inter-procedural alias analysis passes. We can revisit this if it becomes
385   // an efficiency or overhead problem.
386 
387   for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
388        I != IE; ++I)
389     for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
390       if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
391         MD.insert(M);
392       if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
393         MD.insert(M);
394     }
395 
396   if (MD.empty())
397     return;
398 
399   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
400   // the set.
401   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
402   while (!Queue.empty()) {
403     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
404     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
405       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
406         if (MD.insert(M1))
407           Queue.push_back(M1);
408   }
409 
410   // Now we have a complete set of all metadata in the chains used to specify
411   // the noalias scopes and the lists of those scopes.
412   SmallVector<TempMDTuple, 16> DummyNodes;
413   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
414   for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
415        I != IE; ++I) {
416     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
417     MDMap[*I].reset(DummyNodes.back().get());
418   }
419 
420   // Create new metadata nodes to replace the dummy nodes, replacing old
421   // metadata references with either a dummy node or an already-created new
422   // node.
423   for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
424        I != IE; ++I) {
425     SmallVector<Metadata *, 4> NewOps;
426     for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
427       const Metadata *V = (*I)->getOperand(i);
428       if (const MDNode *M = dyn_cast<MDNode>(V))
429         NewOps.push_back(MDMap[M]);
430       else
431         NewOps.push_back(const_cast<Metadata *>(V));
432     }
433 
434     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
435     MDTuple *TempM = cast<MDTuple>(MDMap[*I]);
436     assert(TempM->isTemporary() && "Expected temporary node");
437 
438     TempM->replaceAllUsesWith(NewM);
439   }
440 
441   // Now replace the metadata in the new inlined instructions with the
442   // repacements from the map.
443   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
444        VMI != VMIE; ++VMI) {
445     if (!VMI->second)
446       continue;
447 
448     Instruction *NI = dyn_cast<Instruction>(VMI->second);
449     if (!NI)
450       continue;
451 
452     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
453       MDNode *NewMD = MDMap[M];
454       // If the call site also had alias scope metadata (a list of scopes to
455       // which instructions inside it might belong), propagate those scopes to
456       // the inlined instructions.
457       if (MDNode *CSM =
458               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
459         NewMD = MDNode::concatenate(NewMD, CSM);
460       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
461     } else if (NI->mayReadOrWriteMemory()) {
462       if (MDNode *M =
463               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
464         NI->setMetadata(LLVMContext::MD_alias_scope, M);
465     }
466 
467     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
468       MDNode *NewMD = MDMap[M];
469       // If the call site also had noalias metadata (a list of scopes with
470       // which instructions inside it don't alias), propagate those scopes to
471       // the inlined instructions.
472       if (MDNode *CSM =
473               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
474         NewMD = MDNode::concatenate(NewMD, CSM);
475       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
476     } else if (NI->mayReadOrWriteMemory()) {
477       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
478         NI->setMetadata(LLVMContext::MD_noalias, M);
479     }
480   }
481 }
482 
483 /// If the inlined function has noalias arguments,
484 /// then add new alias scopes for each noalias argument, tag the mapped noalias
485 /// parameters with noalias metadata specifying the new scope, and tag all
486 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
487 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
488                                   const DataLayout &DL, AliasAnalysis *AA) {
489   if (!EnableNoAliasConversion)
490     return;
491 
492   const Function *CalledFunc = CS.getCalledFunction();
493   SmallVector<const Argument *, 4> NoAliasArgs;
494 
495   for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
496        E = CalledFunc->arg_end(); I != E; ++I) {
497     if (I->hasNoAliasAttr() && !I->hasNUses(0))
498       NoAliasArgs.push_back(I);
499   }
500 
501   if (NoAliasArgs.empty())
502     return;
503 
504   // To do a good job, if a noalias variable is captured, we need to know if
505   // the capture point dominates the particular use we're considering.
506   DominatorTree DT;
507   DT.recalculate(const_cast<Function&>(*CalledFunc));
508 
509   // noalias indicates that pointer values based on the argument do not alias
510   // pointer values which are not based on it. So we add a new "scope" for each
511   // noalias function argument. Accesses using pointers based on that argument
512   // become part of that alias scope, accesses using pointers not based on that
513   // argument are tagged as noalias with that scope.
514 
515   DenseMap<const Argument *, MDNode *> NewScopes;
516   MDBuilder MDB(CalledFunc->getContext());
517 
518   // Create a new scope domain for this function.
519   MDNode *NewDomain =
520     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
521   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
522     const Argument *A = NoAliasArgs[i];
523 
524     std::string Name = CalledFunc->getName();
525     if (A->hasName()) {
526       Name += ": %";
527       Name += A->getName();
528     } else {
529       Name += ": argument ";
530       Name += utostr(i);
531     }
532 
533     // Note: We always create a new anonymous root here. This is true regardless
534     // of the linkage of the callee because the aliasing "scope" is not just a
535     // property of the callee, but also all control dependencies in the caller.
536     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
537     NewScopes.insert(std::make_pair(A, NewScope));
538   }
539 
540   // Iterate over all new instructions in the map; for all memory-access
541   // instructions, add the alias scope metadata.
542   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
543        VMI != VMIE; ++VMI) {
544     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
545       if (!VMI->second)
546         continue;
547 
548       Instruction *NI = dyn_cast<Instruction>(VMI->second);
549       if (!NI)
550         continue;
551 
552       bool IsArgMemOnlyCall = false, IsFuncCall = false;
553       SmallVector<const Value *, 2> PtrArgs;
554 
555       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
556         PtrArgs.push_back(LI->getPointerOperand());
557       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
558         PtrArgs.push_back(SI->getPointerOperand());
559       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
560         PtrArgs.push_back(VAAI->getPointerOperand());
561       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
562         PtrArgs.push_back(CXI->getPointerOperand());
563       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
564         PtrArgs.push_back(RMWI->getPointerOperand());
565       else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
566         // If we know that the call does not access memory, then we'll still
567         // know that about the inlined clone of this call site, and we don't
568         // need to add metadata.
569         if (ICS.doesNotAccessMemory())
570           continue;
571 
572         IsFuncCall = true;
573         if (AA) {
574           FunctionModRefBehavior MRB = AA->getModRefBehavior(ICS);
575           if (MRB == FMRB_OnlyAccessesArgumentPointees ||
576               MRB == FMRB_OnlyReadsArgumentPointees)
577             IsArgMemOnlyCall = true;
578         }
579 
580         for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
581              AE = ICS.arg_end(); AI != AE; ++AI) {
582           // We need to check the underlying objects of all arguments, not just
583           // the pointer arguments, because we might be passing pointers as
584           // integers, etc.
585           // However, if we know that the call only accesses pointer arguments,
586           // then we only need to check the pointer arguments.
587           if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
588             continue;
589 
590           PtrArgs.push_back(*AI);
591         }
592       }
593 
594       // If we found no pointers, then this instruction is not suitable for
595       // pairing with an instruction to receive aliasing metadata.
596       // However, if this is a call, this we might just alias with none of the
597       // noalias arguments.
598       if (PtrArgs.empty() && !IsFuncCall)
599         continue;
600 
601       // It is possible that there is only one underlying object, but you
602       // need to go through several PHIs to see it, and thus could be
603       // repeated in the Objects list.
604       SmallPtrSet<const Value *, 4> ObjSet;
605       SmallVector<Metadata *, 4> Scopes, NoAliases;
606 
607       SmallSetVector<const Argument *, 4> NAPtrArgs;
608       for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
609         SmallVector<Value *, 4> Objects;
610         GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
611                              Objects, DL, /* MaxLookup = */ 0);
612 
613         for (Value *O : Objects)
614           ObjSet.insert(O);
615       }
616 
617       // Figure out if we're derived from anything that is not a noalias
618       // argument.
619       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
620       for (const Value *V : ObjSet) {
621         // Is this value a constant that cannot be derived from any pointer
622         // value (we need to exclude constant expressions, for example, that
623         // are formed from arithmetic on global symbols).
624         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
625                              isa<ConstantPointerNull>(V) ||
626                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
627         if (IsNonPtrConst)
628           continue;
629 
630         // If this is anything other than a noalias argument, then we cannot
631         // completely describe the aliasing properties using alias.scope
632         // metadata (and, thus, won't add any).
633         if (const Argument *A = dyn_cast<Argument>(V)) {
634           if (!A->hasNoAliasAttr())
635             UsesAliasingPtr = true;
636         } else {
637           UsesAliasingPtr = true;
638         }
639 
640         // If this is not some identified function-local object (which cannot
641         // directly alias a noalias argument), or some other argument (which,
642         // by definition, also cannot alias a noalias argument), then we could
643         // alias a noalias argument that has been captured).
644         if (!isa<Argument>(V) &&
645             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
646           CanDeriveViaCapture = true;
647       }
648 
649       // A function call can always get captured noalias pointers (via other
650       // parameters, globals, etc.).
651       if (IsFuncCall && !IsArgMemOnlyCall)
652         CanDeriveViaCapture = true;
653 
654       // First, we want to figure out all of the sets with which we definitely
655       // don't alias. Iterate over all noalias set, and add those for which:
656       //   1. The noalias argument is not in the set of objects from which we
657       //      definitely derive.
658       //   2. The noalias argument has not yet been captured.
659       // An arbitrary function that might load pointers could see captured
660       // noalias arguments via other noalias arguments or globals, and so we
661       // must always check for prior capture.
662       for (const Argument *A : NoAliasArgs) {
663         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
664                                  // It might be tempting to skip the
665                                  // PointerMayBeCapturedBefore check if
666                                  // A->hasNoCaptureAttr() is true, but this is
667                                  // incorrect because nocapture only guarantees
668                                  // that no copies outlive the function, not
669                                  // that the value cannot be locally captured.
670                                  !PointerMayBeCapturedBefore(A,
671                                    /* ReturnCaptures */ false,
672                                    /* StoreCaptures */ false, I, &DT)))
673           NoAliases.push_back(NewScopes[A]);
674       }
675 
676       if (!NoAliases.empty())
677         NI->setMetadata(LLVMContext::MD_noalias,
678                         MDNode::concatenate(
679                             NI->getMetadata(LLVMContext::MD_noalias),
680                             MDNode::get(CalledFunc->getContext(), NoAliases)));
681 
682       // Next, we want to figure out all of the sets to which we might belong.
683       // We might belong to a set if the noalias argument is in the set of
684       // underlying objects. If there is some non-noalias argument in our list
685       // of underlying objects, then we cannot add a scope because the fact
686       // that some access does not alias with any set of our noalias arguments
687       // cannot itself guarantee that it does not alias with this access
688       // (because there is some pointer of unknown origin involved and the
689       // other access might also depend on this pointer). We also cannot add
690       // scopes to arbitrary functions unless we know they don't access any
691       // non-parameter pointer-values.
692       bool CanAddScopes = !UsesAliasingPtr;
693       if (CanAddScopes && IsFuncCall)
694         CanAddScopes = IsArgMemOnlyCall;
695 
696       if (CanAddScopes)
697         for (const Argument *A : NoAliasArgs) {
698           if (ObjSet.count(A))
699             Scopes.push_back(NewScopes[A]);
700         }
701 
702       if (!Scopes.empty())
703         NI->setMetadata(
704             LLVMContext::MD_alias_scope,
705             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
706                                 MDNode::get(CalledFunc->getContext(), Scopes)));
707     }
708   }
709 }
710 
711 /// If the inlined function has non-byval align arguments, then
712 /// add @llvm.assume-based alignment assumptions to preserve this information.
713 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
714   if (!PreserveAlignmentAssumptions)
715     return;
716   auto &DL = CS.getCaller()->getParent()->getDataLayout();
717 
718   // To avoid inserting redundant assumptions, we should check for assumptions
719   // already in the caller. To do this, we might need a DT of the caller.
720   DominatorTree DT;
721   bool DTCalculated = false;
722 
723   Function *CalledFunc = CS.getCalledFunction();
724   for (Function::arg_iterator I = CalledFunc->arg_begin(),
725                               E = CalledFunc->arg_end();
726        I != E; ++I) {
727     unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
728     if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
729       if (!DTCalculated) {
730         DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
731                                                ->getParent()));
732         DTCalculated = true;
733       }
734 
735       // If we can already prove the asserted alignment in the context of the
736       // caller, then don't bother inserting the assumption.
737       Value *Arg = CS.getArgument(I->getArgNo());
738       if (getKnownAlignment(Arg, DL, CS.getInstruction(),
739                             &IFI.ACT->getAssumptionCache(*CalledFunc),
740                             &DT) >= Align)
741         continue;
742 
743       IRBuilder<>(CS.getInstruction())
744           .CreateAlignmentAssumption(DL, Arg, Align);
745     }
746   }
747 }
748 
749 /// Once we have cloned code over from a callee into the caller,
750 /// update the specified callgraph to reflect the changes we made.
751 /// Note that it's possible that not all code was copied over, so only
752 /// some edges of the callgraph may remain.
753 static void UpdateCallGraphAfterInlining(CallSite CS,
754                                          Function::iterator FirstNewBlock,
755                                          ValueToValueMapTy &VMap,
756                                          InlineFunctionInfo &IFI) {
757   CallGraph &CG = *IFI.CG;
758   const Function *Caller = CS.getInstruction()->getParent()->getParent();
759   const Function *Callee = CS.getCalledFunction();
760   CallGraphNode *CalleeNode = CG[Callee];
761   CallGraphNode *CallerNode = CG[Caller];
762 
763   // Since we inlined some uninlined call sites in the callee into the caller,
764   // add edges from the caller to all of the callees of the callee.
765   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
766 
767   // Consider the case where CalleeNode == CallerNode.
768   CallGraphNode::CalledFunctionsVector CallCache;
769   if (CalleeNode == CallerNode) {
770     CallCache.assign(I, E);
771     I = CallCache.begin();
772     E = CallCache.end();
773   }
774 
775   for (; I != E; ++I) {
776     const Value *OrigCall = I->first;
777 
778     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
779     // Only copy the edge if the call was inlined!
780     if (VMI == VMap.end() || VMI->second == nullptr)
781       continue;
782 
783     // If the call was inlined, but then constant folded, there is no edge to
784     // add.  Check for this case.
785     Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
786     if (!NewCall)
787       continue;
788 
789     // We do not treat intrinsic calls like real function calls because we
790     // expect them to become inline code; do not add an edge for an intrinsic.
791     CallSite CS = CallSite(NewCall);
792     if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
793       continue;
794 
795     // Remember that this call site got inlined for the client of
796     // InlineFunction.
797     IFI.InlinedCalls.push_back(NewCall);
798 
799     // It's possible that inlining the callsite will cause it to go from an
800     // indirect to a direct call by resolving a function pointer.  If this
801     // happens, set the callee of the new call site to a more precise
802     // destination.  This can also happen if the call graph node of the caller
803     // was just unnecessarily imprecise.
804     if (!I->second->getFunction())
805       if (Function *F = CallSite(NewCall).getCalledFunction()) {
806         // Indirect call site resolved to direct call.
807         CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
808 
809         continue;
810       }
811 
812     CallerNode->addCalledFunction(CallSite(NewCall), I->second);
813   }
814 
815   // Update the call graph by deleting the edge from Callee to Caller.  We must
816   // do this after the loop above in case Caller and Callee are the same.
817   CallerNode->removeCallEdgeFor(CS);
818 }
819 
820 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
821                                     BasicBlock *InsertBlock,
822                                     InlineFunctionInfo &IFI) {
823   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
824   IRBuilder<> Builder(InsertBlock->begin());
825 
826   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
827 
828   // Always generate a memcpy of alignment 1 here because we don't know
829   // the alignment of the src pointer.  Other optimizations can infer
830   // better alignment.
831   Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
832 }
833 
834 /// When inlining a call site that has a byval argument,
835 /// we have to make the implicit memcpy explicit by adding it.
836 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
837                                   const Function *CalledFunc,
838                                   InlineFunctionInfo &IFI,
839                                   unsigned ByValAlignment) {
840   PointerType *ArgTy = cast<PointerType>(Arg->getType());
841   Type *AggTy = ArgTy->getElementType();
842 
843   Function *Caller = TheCall->getParent()->getParent();
844 
845   // If the called function is readonly, then it could not mutate the caller's
846   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
847   // temporary.
848   if (CalledFunc->onlyReadsMemory()) {
849     // If the byval argument has a specified alignment that is greater than the
850     // passed in pointer, then we either have to round up the input pointer or
851     // give up on this transformation.
852     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
853       return Arg;
854 
855     const DataLayout &DL = Caller->getParent()->getDataLayout();
856 
857     // If the pointer is already known to be sufficiently aligned, or if we can
858     // round it up to a larger alignment, then we don't need a temporary.
859     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
860                                    &IFI.ACT->getAssumptionCache(*Caller)) >=
861         ByValAlignment)
862       return Arg;
863 
864     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
865     // for code quality, but rarely happens and is required for correctness.
866   }
867 
868   // Create the alloca.  If we have DataLayout, use nice alignment.
869   unsigned Align =
870       Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
871 
872   // If the byval had an alignment specified, we *must* use at least that
873   // alignment, as it is required by the byval argument (and uses of the
874   // pointer inside the callee).
875   Align = std::max(Align, ByValAlignment);
876 
877   Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
878                                     &*Caller->begin()->begin());
879   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
880 
881   // Uses of the argument in the function should use our new alloca
882   // instead.
883   return NewAlloca;
884 }
885 
886 // Check whether this Value is used by a lifetime intrinsic.
887 static bool isUsedByLifetimeMarker(Value *V) {
888   for (User *U : V->users()) {
889     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
890       switch (II->getIntrinsicID()) {
891       default: break;
892       case Intrinsic::lifetime_start:
893       case Intrinsic::lifetime_end:
894         return true;
895       }
896     }
897   }
898   return false;
899 }
900 
901 // Check whether the given alloca already has
902 // lifetime.start or lifetime.end intrinsics.
903 static bool hasLifetimeMarkers(AllocaInst *AI) {
904   Type *Ty = AI->getType();
905   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
906                                        Ty->getPointerAddressSpace());
907   if (Ty == Int8PtrTy)
908     return isUsedByLifetimeMarker(AI);
909 
910   // Do a scan to find all the casts to i8*.
911   for (User *U : AI->users()) {
912     if (U->getType() != Int8PtrTy) continue;
913     if (U->stripPointerCasts() != AI) continue;
914     if (isUsedByLifetimeMarker(U))
915       return true;
916   }
917   return false;
918 }
919 
920 /// Rebuild the entire inlined-at chain for this instruction so that the top of
921 /// the chain now is inlined-at the new call site.
922 static DebugLoc
923 updateInlinedAtInfo(DebugLoc DL, DILocation *InlinedAtNode, LLVMContext &Ctx,
924                     DenseMap<const DILocation *, DILocation *> &IANodes) {
925   SmallVector<DILocation *, 3> InlinedAtLocations;
926   DILocation *Last = InlinedAtNode;
927   DILocation *CurInlinedAt = DL;
928 
929   // Gather all the inlined-at nodes
930   while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
931     // Skip any we've already built nodes for
932     if (DILocation *Found = IANodes[IA]) {
933       Last = Found;
934       break;
935     }
936 
937     InlinedAtLocations.push_back(IA);
938     CurInlinedAt = IA;
939   }
940 
941   // Starting from the top, rebuild the nodes to point to the new inlined-at
942   // location (then rebuilding the rest of the chain behind it) and update the
943   // map of already-constructed inlined-at nodes.
944   for (const DILocation *MD : make_range(InlinedAtLocations.rbegin(),
945                                          InlinedAtLocations.rend())) {
946     Last = IANodes[MD] = DILocation::getDistinct(
947         Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
948   }
949 
950   // And finally create the normal location for this instruction, referring to
951   // the new inlined-at chain.
952   return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
953 }
954 
955 /// Update inlined instructions' line numbers to
956 /// to encode location where these instructions are inlined.
957 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
958                              Instruction *TheCall) {
959   DebugLoc TheCallDL = TheCall->getDebugLoc();
960   if (!TheCallDL)
961     return;
962 
963   auto &Ctx = Fn->getContext();
964   DILocation *InlinedAtNode = TheCallDL;
965 
966   // Create a unique call site, not to be confused with any other call from the
967   // same location.
968   InlinedAtNode = DILocation::getDistinct(
969       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
970       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
971 
972   // Cache the inlined-at nodes as they're built so they are reused, without
973   // this every instruction's inlined-at chain would become distinct from each
974   // other.
975   DenseMap<const DILocation *, DILocation *> IANodes;
976 
977   for (; FI != Fn->end(); ++FI) {
978     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
979          BI != BE; ++BI) {
980       DebugLoc DL = BI->getDebugLoc();
981       if (!DL) {
982         // If the inlined instruction has no line number, make it look as if it
983         // originates from the call location. This is important for
984         // ((__always_inline__, __nodebug__)) functions which must use caller
985         // location for all instructions in their function body.
986 
987         // Don't update static allocas, as they may get moved later.
988         if (auto *AI = dyn_cast<AllocaInst>(BI))
989           if (isa<Constant>(AI->getArraySize()))
990             continue;
991 
992         BI->setDebugLoc(TheCallDL);
993       } else {
994         BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
995       }
996     }
997   }
998 }
999 
1000 /// This function inlines the called function into the basic block of the
1001 /// caller. This returns false if it is not possible to inline this call.
1002 /// The program is still in a well defined state if this occurs though.
1003 ///
1004 /// Note that this only does one level of inlining.  For example, if the
1005 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1006 /// exists in the instruction stream.  Similarly this will inline a recursive
1007 /// function by one level.
1008 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1009                           bool InsertLifetime) {
1010   Instruction *TheCall = CS.getInstruction();
1011   assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
1012          "Instruction not in function!");
1013 
1014   // If IFI has any state in it, zap it before we fill it in.
1015   IFI.reset();
1016 
1017   const Function *CalledFunc = CS.getCalledFunction();
1018   if (!CalledFunc ||              // Can't inline external function or indirect
1019       CalledFunc->isDeclaration() || // call, or call to a vararg function!
1020       CalledFunc->getFunctionType()->isVarArg()) return false;
1021 
1022   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1023   // calls that we inline.
1024   bool MarkNoUnwind = CS.doesNotThrow();
1025 
1026   BasicBlock *OrigBB = TheCall->getParent();
1027   Function *Caller = OrigBB->getParent();
1028 
1029   // GC poses two hazards to inlining, which only occur when the callee has GC:
1030   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1031   //     caller.
1032   //  2. If the caller has a differing GC, it is invalid to inline.
1033   if (CalledFunc->hasGC()) {
1034     if (!Caller->hasGC())
1035       Caller->setGC(CalledFunc->getGC());
1036     else if (CalledFunc->getGC() != Caller->getGC())
1037       return false;
1038   }
1039 
1040   // Get the personality function from the callee if it contains a landing pad.
1041   Constant *CalledPersonality =
1042       CalledFunc->hasPersonalityFn() ? CalledFunc->getPersonalityFn() : nullptr;
1043 
1044   // Find the personality function used by the landing pads of the caller. If it
1045   // exists, then check to see that it matches the personality function used in
1046   // the callee.
1047   Constant *CallerPersonality =
1048       Caller->hasPersonalityFn() ? Caller->getPersonalityFn() : nullptr;
1049   if (CalledPersonality) {
1050     if (!CallerPersonality)
1051       Caller->setPersonalityFn(CalledPersonality);
1052     // If the personality functions match, then we can perform the
1053     // inlining. Otherwise, we can't inline.
1054     // TODO: This isn't 100% true. Some personality functions are proper
1055     //       supersets of others and can be used in place of the other.
1056     else if (CalledPersonality != CallerPersonality)
1057       return false;
1058   }
1059 
1060   // Get an iterator to the last basic block in the function, which will have
1061   // the new function inlined after it.
1062   Function::iterator LastBlock = &Caller->back();
1063 
1064   // Make sure to capture all of the return instructions from the cloned
1065   // function.
1066   SmallVector<ReturnInst*, 8> Returns;
1067   ClonedCodeInfo InlinedFunctionInfo;
1068   Function::iterator FirstNewBlock;
1069 
1070   { // Scope to destroy VMap after cloning.
1071     ValueToValueMapTy VMap;
1072     // Keep a list of pair (dst, src) to emit byval initializations.
1073     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1074 
1075     auto &DL = Caller->getParent()->getDataLayout();
1076 
1077     assert(CalledFunc->arg_size() == CS.arg_size() &&
1078            "No varargs calls can be inlined!");
1079 
1080     // Calculate the vector of arguments to pass into the function cloner, which
1081     // matches up the formal to the actual argument values.
1082     CallSite::arg_iterator AI = CS.arg_begin();
1083     unsigned ArgNo = 0;
1084     for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
1085          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1086       Value *ActualArg = *AI;
1087 
1088       // When byval arguments actually inlined, we need to make the copy implied
1089       // by them explicit.  However, we don't do this if the callee is readonly
1090       // or readnone, because the copy would be unneeded: the callee doesn't
1091       // modify the struct.
1092       if (CS.isByValArgument(ArgNo)) {
1093         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1094                                         CalledFunc->getParamAlignment(ArgNo+1));
1095         if (ActualArg != *AI)
1096           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1097       }
1098 
1099       VMap[I] = ActualArg;
1100     }
1101 
1102     // Add alignment assumptions if necessary. We do this before the inlined
1103     // instructions are actually cloned into the caller so that we can easily
1104     // check what will be known at the start of the inlined code.
1105     AddAlignmentAssumptions(CS, IFI);
1106 
1107     // We want the inliner to prune the code as it copies.  We would LOVE to
1108     // have no dead or constant instructions leftover after inlining occurs
1109     // (which can happen, e.g., because an argument was constant), but we'll be
1110     // happy with whatever the cloner can do.
1111     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1112                               /*ModuleLevelChanges=*/false, Returns, ".i",
1113                               &InlinedFunctionInfo, TheCall);
1114 
1115     // Remember the first block that is newly cloned over.
1116     FirstNewBlock = LastBlock; ++FirstNewBlock;
1117 
1118     // Inject byval arguments initialization.
1119     for (std::pair<Value*, Value*> &Init : ByValInit)
1120       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1121                               FirstNewBlock, IFI);
1122 
1123     // Update the callgraph if requested.
1124     if (IFI.CG)
1125       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1126 
1127     // Update inlined instructions' line number information.
1128     fixupLineNumbers(Caller, FirstNewBlock, TheCall);
1129 
1130     // Clone existing noalias metadata if necessary.
1131     CloneAliasScopeMetadata(CS, VMap);
1132 
1133     // Add noalias metadata if necessary.
1134     AddAliasScopeMetadata(CS, VMap, DL, IFI.AA);
1135 
1136     // FIXME: We could register any cloned assumptions instead of clearing the
1137     // whole function's cache.
1138     if (IFI.ACT)
1139       IFI.ACT->getAssumptionCache(*Caller).clear();
1140   }
1141 
1142   // If there are any alloca instructions in the block that used to be the entry
1143   // block for the callee, move them to the entry block of the caller.  First
1144   // calculate which instruction they should be inserted before.  We insert the
1145   // instructions at the end of the current alloca list.
1146   {
1147     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1148     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1149          E = FirstNewBlock->end(); I != E; ) {
1150       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1151       if (!AI) continue;
1152 
1153       // If the alloca is now dead, remove it.  This often occurs due to code
1154       // specialization.
1155       if (AI->use_empty()) {
1156         AI->eraseFromParent();
1157         continue;
1158       }
1159 
1160       if (!isa<Constant>(AI->getArraySize()))
1161         continue;
1162 
1163       // Keep track of the static allocas that we inline into the caller.
1164       IFI.StaticAllocas.push_back(AI);
1165 
1166       // Scan for the block of allocas that we can move over, and move them
1167       // all at once.
1168       while (isa<AllocaInst>(I) &&
1169              isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1170         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1171         ++I;
1172       }
1173 
1174       // Transfer all of the allocas over in a block.  Using splice means
1175       // that the instructions aren't removed from the symbol table, then
1176       // reinserted.
1177       Caller->getEntryBlock().getInstList().splice(InsertPoint,
1178                                                    FirstNewBlock->getInstList(),
1179                                                    AI, I);
1180     }
1181     // Move any dbg.declares describing the allocas into the entry basic block.
1182     DIBuilder DIB(*Caller->getParent());
1183     for (auto &AI : IFI.StaticAllocas)
1184       replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1185   }
1186 
1187   bool InlinedMustTailCalls = false;
1188   if (InlinedFunctionInfo.ContainsCalls) {
1189     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1190     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1191       CallSiteTailKind = CI->getTailCallKind();
1192 
1193     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1194          ++BB) {
1195       for (Instruction &I : *BB) {
1196         CallInst *CI = dyn_cast<CallInst>(&I);
1197         if (!CI)
1198           continue;
1199 
1200         // We need to reduce the strength of any inlined tail calls.  For
1201         // musttail, we have to avoid introducing potential unbounded stack
1202         // growth.  For example, if functions 'f' and 'g' are mutually recursive
1203         // with musttail, we can inline 'g' into 'f' so long as we preserve
1204         // musttail on the cloned call to 'f'.  If either the inlined call site
1205         // or the cloned call site is *not* musttail, the program already has
1206         // one frame of stack growth, so it's safe to remove musttail.  Here is
1207         // a table of example transformations:
1208         //
1209         //    f -> musttail g -> musttail f  ==>  f -> musttail f
1210         //    f -> musttail g ->     tail f  ==>  f ->     tail f
1211         //    f ->          g -> musttail f  ==>  f ->          f
1212         //    f ->          g ->     tail f  ==>  f ->          f
1213         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1214         ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1215         CI->setTailCallKind(ChildTCK);
1216         InlinedMustTailCalls |= CI->isMustTailCall();
1217 
1218         // Calls inlined through a 'nounwind' call site should be marked
1219         // 'nounwind'.
1220         if (MarkNoUnwind)
1221           CI->setDoesNotThrow();
1222       }
1223     }
1224   }
1225 
1226   // Leave lifetime markers for the static alloca's, scoping them to the
1227   // function we just inlined.
1228   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1229     IRBuilder<> builder(FirstNewBlock->begin());
1230     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1231       AllocaInst *AI = IFI.StaticAllocas[ai];
1232 
1233       // If the alloca is already scoped to something smaller than the whole
1234       // function then there's no need to add redundant, less accurate markers.
1235       if (hasLifetimeMarkers(AI))
1236         continue;
1237 
1238       // Try to determine the size of the allocation.
1239       ConstantInt *AllocaSize = nullptr;
1240       if (ConstantInt *AIArraySize =
1241           dyn_cast<ConstantInt>(AI->getArraySize())) {
1242         auto &DL = Caller->getParent()->getDataLayout();
1243         Type *AllocaType = AI->getAllocatedType();
1244         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1245         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1246 
1247         // Don't add markers for zero-sized allocas.
1248         if (AllocaArraySize == 0)
1249           continue;
1250 
1251         // Check that array size doesn't saturate uint64_t and doesn't
1252         // overflow when it's multiplied by type size.
1253         if (AllocaArraySize != ~0ULL &&
1254             UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1255           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1256                                         AllocaArraySize * AllocaTypeSize);
1257         }
1258       }
1259 
1260       builder.CreateLifetimeStart(AI, AllocaSize);
1261       for (ReturnInst *RI : Returns) {
1262         // Don't insert llvm.lifetime.end calls between a musttail call and a
1263         // return.  The return kills all local allocas.
1264         if (InlinedMustTailCalls &&
1265             RI->getParent()->getTerminatingMustTailCall())
1266           continue;
1267         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1268       }
1269     }
1270   }
1271 
1272   // If the inlined code contained dynamic alloca instructions, wrap the inlined
1273   // code with llvm.stacksave/llvm.stackrestore intrinsics.
1274   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1275     Module *M = Caller->getParent();
1276     // Get the two intrinsics we care about.
1277     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1278     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1279 
1280     // Insert the llvm.stacksave.
1281     CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1282                              .CreateCall(StackSave, {}, "savedstack");
1283 
1284     // Insert a call to llvm.stackrestore before any return instructions in the
1285     // inlined function.
1286     for (ReturnInst *RI : Returns) {
1287       // Don't insert llvm.stackrestore calls between a musttail call and a
1288       // return.  The return will restore the stack pointer.
1289       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1290         continue;
1291       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1292     }
1293   }
1294 
1295   // If we are inlining for an invoke instruction, we must make sure to rewrite
1296   // any call instructions into invoke instructions.
1297   if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1298     BasicBlock *UnwindDest = II->getUnwindDest();
1299     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1300     if (isa<LandingPadInst>(FirstNonPHI)) {
1301       HandleInlinedLandingPad(II, FirstNewBlock, InlinedFunctionInfo);
1302     } else {
1303       HandleInlinedEHPad(II, FirstNewBlock, InlinedFunctionInfo);
1304     }
1305   }
1306 
1307   // Handle any inlined musttail call sites.  In order for a new call site to be
1308   // musttail, the source of the clone and the inlined call site must have been
1309   // musttail.  Therefore it's safe to return without merging control into the
1310   // phi below.
1311   if (InlinedMustTailCalls) {
1312     // Check if we need to bitcast the result of any musttail calls.
1313     Type *NewRetTy = Caller->getReturnType();
1314     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1315 
1316     // Handle the returns preceded by musttail calls separately.
1317     SmallVector<ReturnInst *, 8> NormalReturns;
1318     for (ReturnInst *RI : Returns) {
1319       CallInst *ReturnedMustTail =
1320           RI->getParent()->getTerminatingMustTailCall();
1321       if (!ReturnedMustTail) {
1322         NormalReturns.push_back(RI);
1323         continue;
1324       }
1325       if (!NeedBitCast)
1326         continue;
1327 
1328       // Delete the old return and any preceding bitcast.
1329       BasicBlock *CurBB = RI->getParent();
1330       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1331       RI->eraseFromParent();
1332       if (OldCast)
1333         OldCast->eraseFromParent();
1334 
1335       // Insert a new bitcast and return with the right type.
1336       IRBuilder<> Builder(CurBB);
1337       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1338     }
1339 
1340     // Leave behind the normal returns so we can merge control flow.
1341     std::swap(Returns, NormalReturns);
1342   }
1343 
1344   // If we cloned in _exactly one_ basic block, and if that block ends in a
1345   // return instruction, we splice the body of the inlined callee directly into
1346   // the calling basic block.
1347   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1348     // Move all of the instructions right before the call.
1349     OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1350                                  FirstNewBlock->begin(), FirstNewBlock->end());
1351     // Remove the cloned basic block.
1352     Caller->getBasicBlockList().pop_back();
1353 
1354     // If the call site was an invoke instruction, add a branch to the normal
1355     // destination.
1356     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1357       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1358       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1359     }
1360 
1361     // If the return instruction returned a value, replace uses of the call with
1362     // uses of the returned value.
1363     if (!TheCall->use_empty()) {
1364       ReturnInst *R = Returns[0];
1365       if (TheCall == R->getReturnValue())
1366         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1367       else
1368         TheCall->replaceAllUsesWith(R->getReturnValue());
1369     }
1370     // Since we are now done with the Call/Invoke, we can delete it.
1371     TheCall->eraseFromParent();
1372 
1373     // Since we are now done with the return instruction, delete it also.
1374     Returns[0]->eraseFromParent();
1375 
1376     // We are now done with the inlining.
1377     return true;
1378   }
1379 
1380   // Otherwise, we have the normal case, of more than one block to inline or
1381   // multiple return sites.
1382 
1383   // We want to clone the entire callee function into the hole between the
1384   // "starter" and "ender" blocks.  How we accomplish this depends on whether
1385   // this is an invoke instruction or a call instruction.
1386   BasicBlock *AfterCallBB;
1387   BranchInst *CreatedBranchToNormalDest = nullptr;
1388   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1389 
1390     // Add an unconditional branch to make this look like the CallInst case...
1391     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1392 
1393     // Split the basic block.  This guarantees that no PHI nodes will have to be
1394     // updated due to new incoming edges, and make the invoke case more
1395     // symmetric to the call case.
1396     AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
1397                                           CalledFunc->getName()+".exit");
1398 
1399   } else {  // It's a call
1400     // If this is a call instruction, we need to split the basic block that
1401     // the call lives in.
1402     //
1403     AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1404                                           CalledFunc->getName()+".exit");
1405   }
1406 
1407   // Change the branch that used to go to AfterCallBB to branch to the first
1408   // basic block of the inlined function.
1409   //
1410   TerminatorInst *Br = OrigBB->getTerminator();
1411   assert(Br && Br->getOpcode() == Instruction::Br &&
1412          "splitBasicBlock broken!");
1413   Br->setOperand(0, FirstNewBlock);
1414 
1415 
1416   // Now that the function is correct, make it a little bit nicer.  In
1417   // particular, move the basic blocks inserted from the end of the function
1418   // into the space made by splitting the source basic block.
1419   Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1420                                      FirstNewBlock, Caller->end());
1421 
1422   // Handle all of the return instructions that we just cloned in, and eliminate
1423   // any users of the original call/invoke instruction.
1424   Type *RTy = CalledFunc->getReturnType();
1425 
1426   PHINode *PHI = nullptr;
1427   if (Returns.size() > 1) {
1428     // The PHI node should go at the front of the new basic block to merge all
1429     // possible incoming values.
1430     if (!TheCall->use_empty()) {
1431       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1432                             AfterCallBB->begin());
1433       // Anything that used the result of the function call should now use the
1434       // PHI node as their operand.
1435       TheCall->replaceAllUsesWith(PHI);
1436     }
1437 
1438     // Loop over all of the return instructions adding entries to the PHI node
1439     // as appropriate.
1440     if (PHI) {
1441       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1442         ReturnInst *RI = Returns[i];
1443         assert(RI->getReturnValue()->getType() == PHI->getType() &&
1444                "Ret value not consistent in function!");
1445         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1446       }
1447     }
1448 
1449 
1450     // Add a branch to the merge points and remove return instructions.
1451     DebugLoc Loc;
1452     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1453       ReturnInst *RI = Returns[i];
1454       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
1455       Loc = RI->getDebugLoc();
1456       BI->setDebugLoc(Loc);
1457       RI->eraseFromParent();
1458     }
1459     // We need to set the debug location to *somewhere* inside the
1460     // inlined function. The line number may be nonsensical, but the
1461     // instruction will at least be associated with the right
1462     // function.
1463     if (CreatedBranchToNormalDest)
1464       CreatedBranchToNormalDest->setDebugLoc(Loc);
1465   } else if (!Returns.empty()) {
1466     // Otherwise, if there is exactly one return value, just replace anything
1467     // using the return value of the call with the computed value.
1468     if (!TheCall->use_empty()) {
1469       if (TheCall == Returns[0]->getReturnValue())
1470         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1471       else
1472         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1473     }
1474 
1475     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1476     BasicBlock *ReturnBB = Returns[0]->getParent();
1477     ReturnBB->replaceAllUsesWith(AfterCallBB);
1478 
1479     // Splice the code from the return block into the block that it will return
1480     // to, which contains the code that was after the call.
1481     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1482                                       ReturnBB->getInstList());
1483 
1484     if (CreatedBranchToNormalDest)
1485       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1486 
1487     // Delete the return instruction now and empty ReturnBB now.
1488     Returns[0]->eraseFromParent();
1489     ReturnBB->eraseFromParent();
1490   } else if (!TheCall->use_empty()) {
1491     // No returns, but something is using the return value of the call.  Just
1492     // nuke the result.
1493     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1494   }
1495 
1496   // Since we are now done with the Call/Invoke, we can delete it.
1497   TheCall->eraseFromParent();
1498 
1499   // If we inlined any musttail calls and the original return is now
1500   // unreachable, delete it.  It can only contain a bitcast and ret.
1501   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1502     AfterCallBB->eraseFromParent();
1503 
1504   // We should always be able to fold the entry block of the function into the
1505   // single predecessor of the block...
1506   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1507   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1508 
1509   // Splice the code entry block into calling block, right before the
1510   // unconditional branch.
1511   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
1512   OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1513 
1514   // Remove the unconditional branch.
1515   OrigBB->getInstList().erase(Br);
1516 
1517   // Now we can remove the CalleeEntry block, which is now empty.
1518   Caller->getBasicBlockList().erase(CalleeEntry);
1519 
1520   // If we inserted a phi node, check to see if it has a single value (e.g. all
1521   // the entries are the same or undef).  If so, remove the PHI so it doesn't
1522   // block other optimizations.
1523   if (PHI) {
1524     auto &DL = Caller->getParent()->getDataLayout();
1525     if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
1526                                        &IFI.ACT->getAssumptionCache(*Caller))) {
1527       PHI->replaceAllUsesWith(V);
1528       PHI->eraseFromParent();
1529     }
1530   }
1531 
1532   return true;
1533 }
1534