xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp (revision cb14a3fe5122c879eae1fb480ed7ce82a699ddb6)
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/CallGraph.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/InstructionSimplify.h"
27 #include "llvm/Analysis/MemoryProfileInfo.h"
28 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
29 #include "llvm/Analysis/ObjCARCUtil.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/IR/AttributeMask.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugInfo.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/EHPersonalities.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InlineAsm.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
65 #include "llvm/Transforms/Utils/Cloning.h"
66 #include "llvm/Transforms/Utils/Local.h"
67 #include "llvm/Transforms/Utils/ValueMapper.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstdint>
71 #include <iterator>
72 #include <limits>
73 #include <optional>
74 #include <string>
75 #include <utility>
76 #include <vector>
77 
78 #define DEBUG_TYPE "inline-function"
79 
80 using namespace llvm;
81 using namespace llvm::memprof;
82 using ProfileCount = Function::ProfileCount;
83 
84 static cl::opt<bool>
85 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
86   cl::Hidden,
87   cl::desc("Convert noalias attributes to metadata during inlining."));
88 
89 static cl::opt<bool>
90     UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
91                         cl::init(true),
92                         cl::desc("Use the llvm.experimental.noalias.scope.decl "
93                                  "intrinsic during inlining."));
94 
95 // Disabled by default, because the added alignment assumptions may increase
96 // compile-time and block optimizations. This option is not suitable for use
97 // with frontends that emit comprehensive parameter alignment annotations.
98 static cl::opt<bool>
99 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
100   cl::init(false), cl::Hidden,
101   cl::desc("Convert align attributes to assumptions during inlining."));
102 
103 static cl::opt<unsigned> InlinerAttributeWindow(
104     "max-inst-checked-for-throw-during-inlining", cl::Hidden,
105     cl::desc("the maximum number of instructions analyzed for may throw during "
106              "attribute inference in inlined body"),
107     cl::init(4));
108 
109 namespace {
110 
111   /// A class for recording information about inlining a landing pad.
112   class LandingPadInliningInfo {
113     /// Destination of the invoke's unwind.
114     BasicBlock *OuterResumeDest;
115 
116     /// Destination for the callee's resume.
117     BasicBlock *InnerResumeDest = nullptr;
118 
119     /// LandingPadInst associated with the invoke.
120     LandingPadInst *CallerLPad = nullptr;
121 
122     /// PHI for EH values from landingpad insts.
123     PHINode *InnerEHValuesPHI = nullptr;
124 
125     SmallVector<Value*, 8> UnwindDestPHIValues;
126 
127   public:
128     LandingPadInliningInfo(InvokeInst *II)
129         : OuterResumeDest(II->getUnwindDest()) {
130       // If there are PHI nodes in the unwind destination block, we need to keep
131       // track of which values came into them from the invoke before removing
132       // the edge from this block.
133       BasicBlock *InvokeBB = II->getParent();
134       BasicBlock::iterator I = OuterResumeDest->begin();
135       for (; isa<PHINode>(I); ++I) {
136         // Save the value to use for this edge.
137         PHINode *PHI = cast<PHINode>(I);
138         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
139       }
140 
141       CallerLPad = cast<LandingPadInst>(I);
142     }
143 
144     /// The outer unwind destination is the target of
145     /// unwind edges introduced for calls within the inlined function.
146     BasicBlock *getOuterResumeDest() const {
147       return OuterResumeDest;
148     }
149 
150     BasicBlock *getInnerResumeDest();
151 
152     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
153 
154     /// Forward the 'resume' instruction to the caller's landing pad block.
155     /// When the landing pad block has only one predecessor, this is
156     /// a simple branch. When there is more than one predecessor, we need to
157     /// split the landing pad block after the landingpad instruction and jump
158     /// to there.
159     void forwardResume(ResumeInst *RI,
160                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
161 
162     /// Add incoming-PHI values to the unwind destination block for the given
163     /// basic block, using the values for the original invoke's source block.
164     void addIncomingPHIValuesFor(BasicBlock *BB) const {
165       addIncomingPHIValuesForInto(BB, OuterResumeDest);
166     }
167 
168     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
169       BasicBlock::iterator I = dest->begin();
170       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
171         PHINode *phi = cast<PHINode>(I);
172         phi->addIncoming(UnwindDestPHIValues[i], src);
173       }
174     }
175   };
176 
177 } // end anonymous namespace
178 
179 /// Get or create a target for the branch from ResumeInsts.
180 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
181   if (InnerResumeDest) return InnerResumeDest;
182 
183   // Split the landing pad.
184   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
185   InnerResumeDest =
186     OuterResumeDest->splitBasicBlock(SplitPoint,
187                                      OuterResumeDest->getName() + ".body");
188 
189   // The number of incoming edges we expect to the inner landing pad.
190   const unsigned PHICapacity = 2;
191 
192   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
193   BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
194   BasicBlock::iterator I = OuterResumeDest->begin();
195   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
196     PHINode *OuterPHI = cast<PHINode>(I);
197     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
198                                         OuterPHI->getName() + ".lpad-body");
199     InnerPHI->insertBefore(InsertPoint);
200     OuterPHI->replaceAllUsesWith(InnerPHI);
201     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
202   }
203 
204   // Create a PHI for the exception values.
205   InnerEHValuesPHI =
206       PHINode::Create(CallerLPad->getType(), PHICapacity, "eh.lpad-body");
207   InnerEHValuesPHI->insertBefore(InsertPoint);
208   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
209   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
210 
211   // All done.
212   return InnerResumeDest;
213 }
214 
215 /// Forward the 'resume' instruction to the caller's landing pad block.
216 /// When the landing pad block has only one predecessor, this is a simple
217 /// branch. When there is more than one predecessor, we need to split the
218 /// landing pad block after the landingpad instruction and jump to there.
219 void LandingPadInliningInfo::forwardResume(
220     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
221   BasicBlock *Dest = getInnerResumeDest();
222   BasicBlock *Src = RI->getParent();
223 
224   BranchInst::Create(Dest, Src);
225 
226   // Update the PHIs in the destination. They were inserted in an order which
227   // makes this work.
228   addIncomingPHIValuesForInto(Src, Dest);
229 
230   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
231   RI->eraseFromParent();
232 }
233 
234 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
235 static Value *getParentPad(Value *EHPad) {
236   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
237     return FPI->getParentPad();
238   return cast<CatchSwitchInst>(EHPad)->getParentPad();
239 }
240 
241 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
242 
243 /// Helper for getUnwindDestToken that does the descendant-ward part of
244 /// the search.
245 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
246                                        UnwindDestMemoTy &MemoMap) {
247   SmallVector<Instruction *, 8> Worklist(1, EHPad);
248 
249   while (!Worklist.empty()) {
250     Instruction *CurrentPad = Worklist.pop_back_val();
251     // We only put pads on the worklist that aren't in the MemoMap.  When
252     // we find an unwind dest for a pad we may update its ancestors, but
253     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
254     // so they should never get updated while queued on the worklist.
255     assert(!MemoMap.count(CurrentPad));
256     Value *UnwindDestToken = nullptr;
257     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
258       if (CatchSwitch->hasUnwindDest()) {
259         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
260       } else {
261         // Catchswitch doesn't have a 'nounwind' variant, and one might be
262         // annotated as "unwinds to caller" when really it's nounwind (see
263         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
264         // parent's unwind dest from this.  We can check its catchpads'
265         // descendants, since they might include a cleanuppad with an
266         // "unwinds to caller" cleanupret, which can be trusted.
267         for (auto HI = CatchSwitch->handler_begin(),
268                   HE = CatchSwitch->handler_end();
269              HI != HE && !UnwindDestToken; ++HI) {
270           BasicBlock *HandlerBlock = *HI;
271           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
272           for (User *Child : CatchPad->users()) {
273             // Intentionally ignore invokes here -- since the catchswitch is
274             // marked "unwind to caller", it would be a verifier error if it
275             // contained an invoke which unwinds out of it, so any invoke we'd
276             // encounter must unwind to some child of the catch.
277             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
278               continue;
279 
280             Instruction *ChildPad = cast<Instruction>(Child);
281             auto Memo = MemoMap.find(ChildPad);
282             if (Memo == MemoMap.end()) {
283               // Haven't figured out this child pad yet; queue it.
284               Worklist.push_back(ChildPad);
285               continue;
286             }
287             // We've already checked this child, but might have found that
288             // it offers no proof either way.
289             Value *ChildUnwindDestToken = Memo->second;
290             if (!ChildUnwindDestToken)
291               continue;
292             // We already know the child's unwind dest, which can either
293             // be ConstantTokenNone to indicate unwind to caller, or can
294             // be another child of the catchpad.  Only the former indicates
295             // the unwind dest of the catchswitch.
296             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
297               UnwindDestToken = ChildUnwindDestToken;
298               break;
299             }
300             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
301           }
302         }
303       }
304     } else {
305       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
306       for (User *U : CleanupPad->users()) {
307         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
308           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
309             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
310           else
311             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
312           break;
313         }
314         Value *ChildUnwindDestToken;
315         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
316           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
317         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
318           Instruction *ChildPad = cast<Instruction>(U);
319           auto Memo = MemoMap.find(ChildPad);
320           if (Memo == MemoMap.end()) {
321             // Haven't resolved this child yet; queue it and keep searching.
322             Worklist.push_back(ChildPad);
323             continue;
324           }
325           // We've checked this child, but still need to ignore it if it
326           // had no proof either way.
327           ChildUnwindDestToken = Memo->second;
328           if (!ChildUnwindDestToken)
329             continue;
330         } else {
331           // Not a relevant user of the cleanuppad
332           continue;
333         }
334         // In a well-formed program, the child/invoke must either unwind to
335         // an(other) child of the cleanup, or exit the cleanup.  In the
336         // first case, continue searching.
337         if (isa<Instruction>(ChildUnwindDestToken) &&
338             getParentPad(ChildUnwindDestToken) == CleanupPad)
339           continue;
340         UnwindDestToken = ChildUnwindDestToken;
341         break;
342       }
343     }
344     // If we haven't found an unwind dest for CurrentPad, we may have queued its
345     // children, so move on to the next in the worklist.
346     if (!UnwindDestToken)
347       continue;
348 
349     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
350     // any ancestors of CurrentPad up to but not including UnwindDestToken's
351     // parent pad.  Record this in the memo map, and check to see if the
352     // original EHPad being queried is one of the ones exited.
353     Value *UnwindParent;
354     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
355       UnwindParent = getParentPad(UnwindPad);
356     else
357       UnwindParent = nullptr;
358     bool ExitedOriginalPad = false;
359     for (Instruction *ExitedPad = CurrentPad;
360          ExitedPad && ExitedPad != UnwindParent;
361          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
362       // Skip over catchpads since they just follow their catchswitches.
363       if (isa<CatchPadInst>(ExitedPad))
364         continue;
365       MemoMap[ExitedPad] = UnwindDestToken;
366       ExitedOriginalPad |= (ExitedPad == EHPad);
367     }
368 
369     if (ExitedOriginalPad)
370       return UnwindDestToken;
371 
372     // Continue the search.
373   }
374 
375   // No definitive information is contained within this funclet.
376   return nullptr;
377 }
378 
379 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
380 /// return that pad instruction.  If it unwinds to caller, return
381 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
382 /// return nullptr.
383 ///
384 /// This routine gets invoked for calls in funclets in inlinees when inlining
385 /// an invoke.  Since many funclets don't have calls inside them, it's queried
386 /// on-demand rather than building a map of pads to unwind dests up front.
387 /// Determining a funclet's unwind dest may require recursively searching its
388 /// descendants, and also ancestors and cousins if the descendants don't provide
389 /// an answer.  Since most funclets will have their unwind dest immediately
390 /// available as the unwind dest of a catchswitch or cleanupret, this routine
391 /// searches top-down from the given pad and then up. To avoid worst-case
392 /// quadratic run-time given that approach, it uses a memo map to avoid
393 /// re-processing funclet trees.  The callers that rewrite the IR as they go
394 /// take advantage of this, for correctness, by checking/forcing rewritten
395 /// pads' entries to match the original callee view.
396 static Value *getUnwindDestToken(Instruction *EHPad,
397                                  UnwindDestMemoTy &MemoMap) {
398   // Catchpads unwind to the same place as their catchswitch;
399   // redirct any queries on catchpads so the code below can
400   // deal with just catchswitches and cleanuppads.
401   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
402     EHPad = CPI->getCatchSwitch();
403 
404   // Check if we've already determined the unwind dest for this pad.
405   auto Memo = MemoMap.find(EHPad);
406   if (Memo != MemoMap.end())
407     return Memo->second;
408 
409   // Search EHPad and, if necessary, its descendants.
410   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
411   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
412   if (UnwindDestToken)
413     return UnwindDestToken;
414 
415   // No information is available for this EHPad from itself or any of its
416   // descendants.  An unwind all the way out to a pad in the caller would
417   // need also to agree with the unwind dest of the parent funclet, so
418   // search up the chain to try to find a funclet with information.  Put
419   // null entries in the memo map to avoid re-processing as we go up.
420   MemoMap[EHPad] = nullptr;
421 #ifndef NDEBUG
422   SmallPtrSet<Instruction *, 4> TempMemos;
423   TempMemos.insert(EHPad);
424 #endif
425   Instruction *LastUselessPad = EHPad;
426   Value *AncestorToken;
427   for (AncestorToken = getParentPad(EHPad);
428        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
429        AncestorToken = getParentPad(AncestorToken)) {
430     // Skip over catchpads since they just follow their catchswitches.
431     if (isa<CatchPadInst>(AncestorPad))
432       continue;
433     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
434     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
435     // call to getUnwindDestToken, that would mean that AncestorPad had no
436     // information in itself, its descendants, or its ancestors.  If that
437     // were the case, then we should also have recorded the lack of information
438     // for the descendant that we're coming from.  So assert that we don't
439     // find a null entry in the MemoMap for AncestorPad.
440     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
441     auto AncestorMemo = MemoMap.find(AncestorPad);
442     if (AncestorMemo == MemoMap.end()) {
443       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
444     } else {
445       UnwindDestToken = AncestorMemo->second;
446     }
447     if (UnwindDestToken)
448       break;
449     LastUselessPad = AncestorPad;
450     MemoMap[LastUselessPad] = nullptr;
451 #ifndef NDEBUG
452     TempMemos.insert(LastUselessPad);
453 #endif
454   }
455 
456   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
457   // returned nullptr (and likewise for EHPad and any of its ancestors up to
458   // LastUselessPad), so LastUselessPad has no information from below.  Since
459   // getUnwindDestTokenHelper must investigate all downward paths through
460   // no-information nodes to prove that a node has no information like this,
461   // and since any time it finds information it records it in the MemoMap for
462   // not just the immediately-containing funclet but also any ancestors also
463   // exited, it must be the case that, walking downward from LastUselessPad,
464   // visiting just those nodes which have not been mapped to an unwind dest
465   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
466   // they are just used to keep getUnwindDestTokenHelper from repeating work),
467   // any node visited must have been exhaustively searched with no information
468   // for it found.
469   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
470   while (!Worklist.empty()) {
471     Instruction *UselessPad = Worklist.pop_back_val();
472     auto Memo = MemoMap.find(UselessPad);
473     if (Memo != MemoMap.end() && Memo->second) {
474       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
475       // that it is a funclet that does have information about unwinding to
476       // a particular destination; its parent was a useless pad.
477       // Since its parent has no information, the unwind edge must not escape
478       // the parent, and must target a sibling of this pad.  This local unwind
479       // gives us no information about EHPad.  Leave it and the subtree rooted
480       // at it alone.
481       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
482       continue;
483     }
484     // We know we don't have information for UselesPad.  If it has an entry in
485     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
486     // added on this invocation of getUnwindDestToken; if a previous invocation
487     // recorded nullptr, it would have had to prove that the ancestors of
488     // UselessPad, which include LastUselessPad, had no information, and that
489     // in turn would have required proving that the descendants of
490     // LastUselesPad, which include EHPad, have no information about
491     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
492     // the MemoMap on that invocation, which isn't the case if we got here.
493     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
494     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
495     // information that we'd be contradicting by making a map entry for it
496     // (which is something that getUnwindDestTokenHelper must have proved for
497     // us to get here).  Just assert on is direct users here; the checks in
498     // this downward walk at its descendants will verify that they don't have
499     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
500     // unwind edges or unwind to a sibling).
501     MemoMap[UselessPad] = UnwindDestToken;
502     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
503       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
504       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
505         auto *CatchPad = HandlerBlock->getFirstNonPHI();
506         for (User *U : CatchPad->users()) {
507           assert(
508               (!isa<InvokeInst>(U) ||
509                (getParentPad(
510                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
511                 CatchPad)) &&
512               "Expected useless pad");
513           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
514             Worklist.push_back(cast<Instruction>(U));
515         }
516       }
517     } else {
518       assert(isa<CleanupPadInst>(UselessPad));
519       for (User *U : UselessPad->users()) {
520         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
521         assert((!isa<InvokeInst>(U) ||
522                 (getParentPad(
523                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
524                  UselessPad)) &&
525                "Expected useless pad");
526         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
527           Worklist.push_back(cast<Instruction>(U));
528       }
529     }
530   }
531 
532   return UnwindDestToken;
533 }
534 
535 /// When we inline a basic block into an invoke,
536 /// we have to turn all of the calls that can throw into invokes.
537 /// This function analyze BB to see if there are any calls, and if so,
538 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
539 /// nodes in that block with the values specified in InvokeDestPHIValues.
540 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
541     BasicBlock *BB, BasicBlock *UnwindEdge,
542     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
543   for (Instruction &I : llvm::make_early_inc_range(*BB)) {
544     // We only need to check for function calls: inlined invoke
545     // instructions require no special handling.
546     CallInst *CI = dyn_cast<CallInst>(&I);
547 
548     if (!CI || CI->doesNotThrow())
549       continue;
550 
551     // We do not need to (and in fact, cannot) convert possibly throwing calls
552     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
553     // invokes.  The caller's "segment" of the deoptimization continuation
554     // attached to the newly inlined @llvm.experimental_deoptimize
555     // (resp. @llvm.experimental.guard) call should contain the exception
556     // handling logic, if any.
557     if (auto *F = CI->getCalledFunction())
558       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
559           F->getIntrinsicID() == Intrinsic::experimental_guard)
560         continue;
561 
562     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
563       // This call is nested inside a funclet.  If that funclet has an unwind
564       // destination within the inlinee, then unwinding out of this call would
565       // be UB.  Rewriting this call to an invoke which targets the inlined
566       // invoke's unwind dest would give the call's parent funclet multiple
567       // unwind destinations, which is something that subsequent EH table
568       // generation can't handle and that the veirifer rejects.  So when we
569       // see such a call, leave it as a call.
570       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
571       Value *UnwindDestToken =
572           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
573       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
574         continue;
575 #ifndef NDEBUG
576       Instruction *MemoKey;
577       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
578         MemoKey = CatchPad->getCatchSwitch();
579       else
580         MemoKey = FuncletPad;
581       assert(FuncletUnwindMap->count(MemoKey) &&
582              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
583              "must get memoized to avoid confusing later searches");
584 #endif // NDEBUG
585     }
586 
587     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
588     return BB;
589   }
590   return nullptr;
591 }
592 
593 /// If we inlined an invoke site, we need to convert calls
594 /// in the body of the inlined function into invokes.
595 ///
596 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
597 /// block of the inlined code (the last block is the end of the function),
598 /// and InlineCodeInfo is information about the code that got inlined.
599 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
600                                     ClonedCodeInfo &InlinedCodeInfo) {
601   BasicBlock *InvokeDest = II->getUnwindDest();
602 
603   Function *Caller = FirstNewBlock->getParent();
604 
605   // The inlined code is currently at the end of the function, scan from the
606   // start of the inlined code to its end, checking for stuff we need to
607   // rewrite.
608   LandingPadInliningInfo Invoke(II);
609 
610   // Get all of the inlined landing pad instructions.
611   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
612   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
613        I != E; ++I)
614     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
615       InlinedLPads.insert(II->getLandingPadInst());
616 
617   // Append the clauses from the outer landing pad instruction into the inlined
618   // landing pad instructions.
619   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
620   for (LandingPadInst *InlinedLPad : InlinedLPads) {
621     unsigned OuterNum = OuterLPad->getNumClauses();
622     InlinedLPad->reserveClauses(OuterNum);
623     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
624       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
625     if (OuterLPad->isCleanup())
626       InlinedLPad->setCleanup(true);
627   }
628 
629   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
630        BB != E; ++BB) {
631     if (InlinedCodeInfo.ContainsCalls)
632       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
633               &*BB, Invoke.getOuterResumeDest()))
634         // Update any PHI nodes in the exceptional block to indicate that there
635         // is now a new entry in them.
636         Invoke.addIncomingPHIValuesFor(NewBB);
637 
638     // Forward any resumes that are remaining here.
639     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
640       Invoke.forwardResume(RI, InlinedLPads);
641   }
642 
643   // Now that everything is happy, we have one final detail.  The PHI nodes in
644   // the exception destination block still have entries due to the original
645   // invoke instruction. Eliminate these entries (which might even delete the
646   // PHI node) now.
647   InvokeDest->removePredecessor(II->getParent());
648 }
649 
650 /// If we inlined an invoke site, we need to convert calls
651 /// in the body of the inlined function into invokes.
652 ///
653 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
654 /// block of the inlined code (the last block is the end of the function),
655 /// and InlineCodeInfo is information about the code that got inlined.
656 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
657                                ClonedCodeInfo &InlinedCodeInfo) {
658   BasicBlock *UnwindDest = II->getUnwindDest();
659   Function *Caller = FirstNewBlock->getParent();
660 
661   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
662 
663   // If there are PHI nodes in the unwind destination block, we need to keep
664   // track of which values came into them from the invoke before removing the
665   // edge from this block.
666   SmallVector<Value *, 8> UnwindDestPHIValues;
667   BasicBlock *InvokeBB = II->getParent();
668   for (PHINode &PHI : UnwindDest->phis()) {
669     // Save the value to use for this edge.
670     UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
671   }
672 
673   // Add incoming-PHI values to the unwind destination block for the given basic
674   // block, using the values for the original invoke's source block.
675   auto UpdatePHINodes = [&](BasicBlock *Src) {
676     BasicBlock::iterator I = UnwindDest->begin();
677     for (Value *V : UnwindDestPHIValues) {
678       PHINode *PHI = cast<PHINode>(I);
679       PHI->addIncoming(V, Src);
680       ++I;
681     }
682   };
683 
684   // This connects all the instructions which 'unwind to caller' to the invoke
685   // destination.
686   UnwindDestMemoTy FuncletUnwindMap;
687   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
688        BB != E; ++BB) {
689     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
690       if (CRI->unwindsToCaller()) {
691         auto *CleanupPad = CRI->getCleanupPad();
692         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
693         CRI->eraseFromParent();
694         UpdatePHINodes(&*BB);
695         // Finding a cleanupret with an unwind destination would confuse
696         // subsequent calls to getUnwindDestToken, so map the cleanuppad
697         // to short-circuit any such calls and recognize this as an "unwind
698         // to caller" cleanup.
699         assert(!FuncletUnwindMap.count(CleanupPad) ||
700                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
701         FuncletUnwindMap[CleanupPad] =
702             ConstantTokenNone::get(Caller->getContext());
703       }
704     }
705 
706     Instruction *I = BB->getFirstNonPHI();
707     if (!I->isEHPad())
708       continue;
709 
710     Instruction *Replacement = nullptr;
711     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
712       if (CatchSwitch->unwindsToCaller()) {
713         Value *UnwindDestToken;
714         if (auto *ParentPad =
715                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
716           // This catchswitch is nested inside another funclet.  If that
717           // funclet has an unwind destination within the inlinee, then
718           // unwinding out of this catchswitch would be UB.  Rewriting this
719           // catchswitch to unwind to the inlined invoke's unwind dest would
720           // give the parent funclet multiple unwind destinations, which is
721           // something that subsequent EH table generation can't handle and
722           // that the veirifer rejects.  So when we see such a call, leave it
723           // as "unwind to caller".
724           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
725           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
726             continue;
727         } else {
728           // This catchswitch has no parent to inherit constraints from, and
729           // none of its descendants can have an unwind edge that exits it and
730           // targets another funclet in the inlinee.  It may or may not have a
731           // descendant that definitively has an unwind to caller.  In either
732           // case, we'll have to assume that any unwinds out of it may need to
733           // be routed to the caller, so treat it as though it has a definitive
734           // unwind to caller.
735           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
736         }
737         auto *NewCatchSwitch = CatchSwitchInst::Create(
738             CatchSwitch->getParentPad(), UnwindDest,
739             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
740             CatchSwitch);
741         for (BasicBlock *PadBB : CatchSwitch->handlers())
742           NewCatchSwitch->addHandler(PadBB);
743         // Propagate info for the old catchswitch over to the new one in
744         // the unwind map.  This also serves to short-circuit any subsequent
745         // checks for the unwind dest of this catchswitch, which would get
746         // confused if they found the outer handler in the callee.
747         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
748         Replacement = NewCatchSwitch;
749       }
750     } else if (!isa<FuncletPadInst>(I)) {
751       llvm_unreachable("unexpected EHPad!");
752     }
753 
754     if (Replacement) {
755       Replacement->takeName(I);
756       I->replaceAllUsesWith(Replacement);
757       I->eraseFromParent();
758       UpdatePHINodes(&*BB);
759     }
760   }
761 
762   if (InlinedCodeInfo.ContainsCalls)
763     for (Function::iterator BB = FirstNewBlock->getIterator(),
764                             E = Caller->end();
765          BB != E; ++BB)
766       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
767               &*BB, UnwindDest, &FuncletUnwindMap))
768         // Update any PHI nodes in the exceptional block to indicate that there
769         // is now a new entry in them.
770         UpdatePHINodes(NewBB);
771 
772   // Now that everything is happy, we have one final detail.  The PHI nodes in
773   // the exception destination block still have entries due to the original
774   // invoke instruction. Eliminate these entries (which might even delete the
775   // PHI node) now.
776   UnwindDest->removePredecessor(InvokeBB);
777 }
778 
779 static bool haveCommonPrefix(MDNode *MIBStackContext,
780                              MDNode *CallsiteStackContext) {
781   assert(MIBStackContext->getNumOperands() > 0 &&
782          CallsiteStackContext->getNumOperands() > 0);
783   // Because of the context trimming performed during matching, the callsite
784   // context could have more stack ids than the MIB. We match up to the end of
785   // the shortest stack context.
786   for (auto MIBStackIter = MIBStackContext->op_begin(),
787             CallsiteStackIter = CallsiteStackContext->op_begin();
788        MIBStackIter != MIBStackContext->op_end() &&
789        CallsiteStackIter != CallsiteStackContext->op_end();
790        MIBStackIter++, CallsiteStackIter++) {
791     auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
792     auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
793     assert(Val1 && Val2);
794     if (Val1->getZExtValue() != Val2->getZExtValue())
795       return false;
796   }
797   return true;
798 }
799 
800 static void removeMemProfMetadata(CallBase *Call) {
801   Call->setMetadata(LLVMContext::MD_memprof, nullptr);
802 }
803 
804 static void removeCallsiteMetadata(CallBase *Call) {
805   Call->setMetadata(LLVMContext::MD_callsite, nullptr);
806 }
807 
808 static void updateMemprofMetadata(CallBase *CI,
809                                   const std::vector<Metadata *> &MIBList) {
810   assert(!MIBList.empty());
811   // Remove existing memprof, which will either be replaced or may not be needed
812   // if we are able to use a single allocation type function attribute.
813   removeMemProfMetadata(CI);
814   CallStackTrie CallStack;
815   for (Metadata *MIB : MIBList)
816     CallStack.addCallStack(cast<MDNode>(MIB));
817   bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);
818   assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));
819   if (!MemprofMDAttached)
820     // If we used a function attribute remove the callsite metadata as well.
821     removeCallsiteMetadata(CI);
822 }
823 
824 // Update the metadata on the inlined copy ClonedCall of a call OrigCall in the
825 // inlined callee body, based on the callsite metadata InlinedCallsiteMD from
826 // the call that was inlined.
827 static void propagateMemProfHelper(const CallBase *OrigCall,
828                                    CallBase *ClonedCall,
829                                    MDNode *InlinedCallsiteMD) {
830   MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);
831   MDNode *ClonedCallsiteMD = nullptr;
832   // Check if the call originally had callsite metadata, and update it for the
833   // new call in the inlined body.
834   if (OrigCallsiteMD) {
835     // The cloned call's context is now the concatenation of the original call's
836     // callsite metadata and the callsite metadata on the call where it was
837     // inlined.
838     ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);
839     ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
840   }
841 
842   // Update any memprof metadata on the cloned call.
843   MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);
844   if (!OrigMemProfMD)
845     return;
846   // We currently expect that allocations with memprof metadata also have
847   // callsite metadata for the allocation's part of the context.
848   assert(OrigCallsiteMD);
849 
850   // New call's MIB list.
851   std::vector<Metadata *> NewMIBList;
852 
853   // For each MIB metadata, check if its call stack context starts with the
854   // new clone's callsite metadata. If so, that MIB goes onto the cloned call in
855   // the inlined body. If not, it stays on the out-of-line original call.
856   for (auto &MIBOp : OrigMemProfMD->operands()) {
857     MDNode *MIB = dyn_cast<MDNode>(MIBOp);
858     // Stack is first operand of MIB.
859     MDNode *StackMD = getMIBStackNode(MIB);
860     assert(StackMD);
861     // See if the new cloned callsite context matches this profiled context.
862     if (haveCommonPrefix(StackMD, ClonedCallsiteMD))
863       // Add it to the cloned call's MIB list.
864       NewMIBList.push_back(MIB);
865   }
866   if (NewMIBList.empty()) {
867     removeMemProfMetadata(ClonedCall);
868     removeCallsiteMetadata(ClonedCall);
869     return;
870   }
871   if (NewMIBList.size() < OrigMemProfMD->getNumOperands())
872     updateMemprofMetadata(ClonedCall, NewMIBList);
873 }
874 
875 // Update memprof related metadata (!memprof and !callsite) based on the
876 // inlining of Callee into the callsite at CB. The updates include merging the
877 // inlined callee's callsite metadata with that of the inlined call,
878 // and moving the subset of any memprof contexts to the inlined callee
879 // allocations if they match the new inlined call stack.
880 static void
881 propagateMemProfMetadata(Function *Callee, CallBase &CB,
882                          bool ContainsMemProfMetadata,
883                          const ValueMap<const Value *, WeakTrackingVH> &VMap) {
884   MDNode *CallsiteMD = CB.getMetadata(LLVMContext::MD_callsite);
885   // Only need to update if the inlined callsite had callsite metadata, or if
886   // there was any memprof metadata inlined.
887   if (!CallsiteMD && !ContainsMemProfMetadata)
888     return;
889 
890   // Propagate metadata onto the cloned calls in the inlined callee.
891   for (const auto &Entry : VMap) {
892     // See if this is a call that has been inlined and remapped, and not
893     // simplified away in the process.
894     auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
895     auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
896     if (!OrigCall || !ClonedCall)
897       continue;
898     // If the inlined callsite did not have any callsite metadata, then it isn't
899     // involved in any profiled call contexts, and we can remove any memprof
900     // metadata on the cloned call.
901     if (!CallsiteMD) {
902       removeMemProfMetadata(ClonedCall);
903       removeCallsiteMetadata(ClonedCall);
904       continue;
905     }
906     propagateMemProfHelper(OrigCall, ClonedCall, CallsiteMD);
907   }
908 }
909 
910 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
911 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
912 /// be propagated to all memory-accessing cloned instructions.
913 static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart,
914                                       Function::iterator FEnd) {
915   MDNode *MemParallelLoopAccess =
916       CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
917   MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
918   MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
919   MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
920   if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
921     return;
922 
923   for (BasicBlock &BB : make_range(FStart, FEnd)) {
924     for (Instruction &I : BB) {
925       // This metadata is only relevant for instructions that access memory.
926       if (!I.mayReadOrWriteMemory())
927         continue;
928 
929       if (MemParallelLoopAccess) {
930         // TODO: This probably should not overwrite MemParalleLoopAccess.
931         MemParallelLoopAccess = MDNode::concatenate(
932             I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
933             MemParallelLoopAccess);
934         I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
935                       MemParallelLoopAccess);
936       }
937 
938       if (AccessGroup)
939         I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
940             I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
941 
942       if (AliasScope)
943         I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
944             I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
945 
946       if (NoAlias)
947         I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
948             I.getMetadata(LLVMContext::MD_noalias), NoAlias));
949     }
950   }
951 }
952 
953 /// Bundle operands of the inlined function must be added to inlined call sites.
954 static void PropagateOperandBundles(Function::iterator InlinedBB,
955                                     Instruction *CallSiteEHPad) {
956   for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) {
957     CallBase *I = dyn_cast<CallBase>(&II);
958     if (!I)
959       continue;
960     // Skip call sites which already have a "funclet" bundle.
961     if (I->getOperandBundle(LLVMContext::OB_funclet))
962       continue;
963     // Skip call sites which are nounwind intrinsics (as long as they don't
964     // lower into regular function calls in the course of IR transformations).
965     auto *CalledFn =
966         dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
967     if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&
968         !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
969       continue;
970 
971     SmallVector<OperandBundleDef, 1> OpBundles;
972     I->getOperandBundlesAsDefs(OpBundles);
973     OpBundles.emplace_back("funclet", CallSiteEHPad);
974 
975     Instruction *NewInst = CallBase::Create(I, OpBundles, I);
976     NewInst->takeName(I);
977     I->replaceAllUsesWith(NewInst);
978     I->eraseFromParent();
979   }
980 }
981 
982 namespace {
983 /// Utility for cloning !noalias and !alias.scope metadata. When a code region
984 /// using scoped alias metadata is inlined, the aliasing relationships may not
985 /// hold between the two version. It is necessary to create a deep clone of the
986 /// metadata, putting the two versions in separate scope domains.
987 class ScopedAliasMetadataDeepCloner {
988   using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>;
989   SetVector<const MDNode *> MD;
990   MetadataMap MDMap;
991   void addRecursiveMetadataUses();
992 
993 public:
994   ScopedAliasMetadataDeepCloner(const Function *F);
995 
996   /// Create a new clone of the scoped alias metadata, which will be used by
997   /// subsequent remap() calls.
998   void clone();
999 
1000   /// Remap instructions in the given range from the original to the cloned
1001   /// metadata.
1002   void remap(Function::iterator FStart, Function::iterator FEnd);
1003 };
1004 } // namespace
1005 
1006 ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1007     const Function *F) {
1008   for (const BasicBlock &BB : *F) {
1009     for (const Instruction &I : BB) {
1010       if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1011         MD.insert(M);
1012       if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1013         MD.insert(M);
1014 
1015       // We also need to clone the metadata in noalias intrinsics.
1016       if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1017         MD.insert(Decl->getScopeList());
1018     }
1019   }
1020   addRecursiveMetadataUses();
1021 }
1022 
1023 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1024   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
1025   while (!Queue.empty()) {
1026     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
1027     for (const Metadata *Op : M->operands())
1028       if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
1029         if (MD.insert(OpMD))
1030           Queue.push_back(OpMD);
1031   }
1032 }
1033 
1034 void ScopedAliasMetadataDeepCloner::clone() {
1035   assert(MDMap.empty() && "clone() already called ?");
1036 
1037   SmallVector<TempMDTuple, 16> DummyNodes;
1038   for (const MDNode *I : MD) {
1039     DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), std::nullopt));
1040     MDMap[I].reset(DummyNodes.back().get());
1041   }
1042 
1043   // Create new metadata nodes to replace the dummy nodes, replacing old
1044   // metadata references with either a dummy node or an already-created new
1045   // node.
1046   SmallVector<Metadata *, 4> NewOps;
1047   for (const MDNode *I : MD) {
1048     for (const Metadata *Op : I->operands()) {
1049       if (const MDNode *M = dyn_cast<MDNode>(Op))
1050         NewOps.push_back(MDMap[M]);
1051       else
1052         NewOps.push_back(const_cast<Metadata *>(Op));
1053     }
1054 
1055     MDNode *NewM = MDNode::get(I->getContext(), NewOps);
1056     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
1057     assert(TempM->isTemporary() && "Expected temporary node");
1058 
1059     TempM->replaceAllUsesWith(NewM);
1060     NewOps.clear();
1061   }
1062 }
1063 
1064 void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
1065                                           Function::iterator FEnd) {
1066   if (MDMap.empty())
1067     return; // Nothing to do.
1068 
1069   for (BasicBlock &BB : make_range(FStart, FEnd)) {
1070     for (Instruction &I : BB) {
1071       // TODO: The null checks for the MDMap.lookup() results should no longer
1072       // be necessary.
1073       if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1074         if (MDNode *MNew = MDMap.lookup(M))
1075           I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1076 
1077       if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1078         if (MDNode *MNew = MDMap.lookup(M))
1079           I.setMetadata(LLVMContext::MD_noalias, MNew);
1080 
1081       if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1082         if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1083           Decl->setScopeList(MNew);
1084     }
1085   }
1086 }
1087 
1088 /// If the inlined function has noalias arguments,
1089 /// then add new alias scopes for each noalias argument, tag the mapped noalias
1090 /// parameters with noalias metadata specifying the new scope, and tag all
1091 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
1092 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
1093                                   const DataLayout &DL, AAResults *CalleeAAR,
1094                                   ClonedCodeInfo &InlinedFunctionInfo) {
1095   if (!EnableNoAliasConversion)
1096     return;
1097 
1098   const Function *CalledFunc = CB.getCalledFunction();
1099   SmallVector<const Argument *, 4> NoAliasArgs;
1100 
1101   for (const Argument &Arg : CalledFunc->args())
1102     if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1103       NoAliasArgs.push_back(&Arg);
1104 
1105   if (NoAliasArgs.empty())
1106     return;
1107 
1108   // To do a good job, if a noalias variable is captured, we need to know if
1109   // the capture point dominates the particular use we're considering.
1110   DominatorTree DT;
1111   DT.recalculate(const_cast<Function&>(*CalledFunc));
1112 
1113   // noalias indicates that pointer values based on the argument do not alias
1114   // pointer values which are not based on it. So we add a new "scope" for each
1115   // noalias function argument. Accesses using pointers based on that argument
1116   // become part of that alias scope, accesses using pointers not based on that
1117   // argument are tagged as noalias with that scope.
1118 
1119   DenseMap<const Argument *, MDNode *> NewScopes;
1120   MDBuilder MDB(CalledFunc->getContext());
1121 
1122   // Create a new scope domain for this function.
1123   MDNode *NewDomain =
1124     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
1125   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
1126     const Argument *A = NoAliasArgs[i];
1127 
1128     std::string Name = std::string(CalledFunc->getName());
1129     if (A->hasName()) {
1130       Name += ": %";
1131       Name += A->getName();
1132     } else {
1133       Name += ": argument ";
1134       Name += utostr(i);
1135     }
1136 
1137     // Note: We always create a new anonymous root here. This is true regardless
1138     // of the linkage of the callee because the aliasing "scope" is not just a
1139     // property of the callee, but also all control dependencies in the caller.
1140     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
1141     NewScopes.insert(std::make_pair(A, NewScope));
1142 
1143     if (UseNoAliasIntrinsic) {
1144       // Introduce a llvm.experimental.noalias.scope.decl for the noalias
1145       // argument.
1146       MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
1147       auto *NoAliasDecl =
1148           IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);
1149       // Ignore the result for now. The result will be used when the
1150       // llvm.noalias intrinsic is introduced.
1151       (void)NoAliasDecl;
1152     }
1153   }
1154 
1155   // Iterate over all new instructions in the map; for all memory-access
1156   // instructions, add the alias scope metadata.
1157   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1158        VMI != VMIE; ++VMI) {
1159     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1160       if (!VMI->second)
1161         continue;
1162 
1163       Instruction *NI = dyn_cast<Instruction>(VMI->second);
1164       if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1165         continue;
1166 
1167       bool IsArgMemOnlyCall = false, IsFuncCall = false;
1168       SmallVector<const Value *, 2> PtrArgs;
1169 
1170       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1171         PtrArgs.push_back(LI->getPointerOperand());
1172       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1173         PtrArgs.push_back(SI->getPointerOperand());
1174       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1175         PtrArgs.push_back(VAAI->getPointerOperand());
1176       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1177         PtrArgs.push_back(CXI->getPointerOperand());
1178       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1179         PtrArgs.push_back(RMWI->getPointerOperand());
1180       else if (const auto *Call = dyn_cast<CallBase>(I)) {
1181         // If we know that the call does not access memory, then we'll still
1182         // know that about the inlined clone of this call site, and we don't
1183         // need to add metadata.
1184         if (Call->doesNotAccessMemory())
1185           continue;
1186 
1187         IsFuncCall = true;
1188         if (CalleeAAR) {
1189           MemoryEffects ME = CalleeAAR->getMemoryEffects(Call);
1190 
1191           // We'll retain this knowledge without additional metadata.
1192           if (ME.onlyAccessesInaccessibleMem())
1193             continue;
1194 
1195           if (ME.onlyAccessesArgPointees())
1196             IsArgMemOnlyCall = true;
1197         }
1198 
1199         for (Value *Arg : Call->args()) {
1200           // Only care about pointer arguments. If a noalias argument is
1201           // accessed through a non-pointer argument, it must be captured
1202           // first (e.g. via ptrtoint), and we protect against captures below.
1203           if (!Arg->getType()->isPointerTy())
1204             continue;
1205 
1206           PtrArgs.push_back(Arg);
1207         }
1208       }
1209 
1210       // If we found no pointers, then this instruction is not suitable for
1211       // pairing with an instruction to receive aliasing metadata.
1212       // However, if this is a call, this we might just alias with none of the
1213       // noalias arguments.
1214       if (PtrArgs.empty() && !IsFuncCall)
1215         continue;
1216 
1217       // It is possible that there is only one underlying object, but you
1218       // need to go through several PHIs to see it, and thus could be
1219       // repeated in the Objects list.
1220       SmallPtrSet<const Value *, 4> ObjSet;
1221       SmallVector<Metadata *, 4> Scopes, NoAliases;
1222 
1223       SmallSetVector<const Argument *, 4> NAPtrArgs;
1224       for (const Value *V : PtrArgs) {
1225         SmallVector<const Value *, 4> Objects;
1226         getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1227 
1228         for (const Value *O : Objects)
1229           ObjSet.insert(O);
1230       }
1231 
1232       // Figure out if we're derived from anything that is not a noalias
1233       // argument.
1234       bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,
1235            UsesUnknownObject = false;
1236       for (const Value *V : ObjSet) {
1237         // Is this value a constant that cannot be derived from any pointer
1238         // value (we need to exclude constant expressions, for example, that
1239         // are formed from arithmetic on global symbols).
1240         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1241                              isa<ConstantPointerNull>(V) ||
1242                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1243         if (IsNonPtrConst)
1244           continue;
1245 
1246         // If this is anything other than a noalias argument, then we cannot
1247         // completely describe the aliasing properties using alias.scope
1248         // metadata (and, thus, won't add any).
1249         if (const Argument *A = dyn_cast<Argument>(V)) {
1250           if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1251             UsesAliasingPtr = true;
1252         } else {
1253           UsesAliasingPtr = true;
1254         }
1255 
1256         if (isEscapeSource(V)) {
1257           // An escape source can only alias with a noalias argument if it has
1258           // been captured beforehand.
1259           RequiresNoCaptureBefore = true;
1260         } else if (!isa<Argument>(V) && !isIdentifiedObject(V)) {
1261           // If this is neither an escape source, nor some identified object
1262           // (which cannot directly alias a noalias argument), nor some other
1263           // argument (which, by definition, also cannot alias a noalias
1264           // argument), conservatively do not make any assumptions.
1265           UsesUnknownObject = true;
1266         }
1267       }
1268 
1269       // Nothing we can do if the used underlying object cannot be reliably
1270       // determined.
1271       if (UsesUnknownObject)
1272         continue;
1273 
1274       // A function call can always get captured noalias pointers (via other
1275       // parameters, globals, etc.).
1276       if (IsFuncCall && !IsArgMemOnlyCall)
1277         RequiresNoCaptureBefore = true;
1278 
1279       // First, we want to figure out all of the sets with which we definitely
1280       // don't alias. Iterate over all noalias set, and add those for which:
1281       //   1. The noalias argument is not in the set of objects from which we
1282       //      definitely derive.
1283       //   2. The noalias argument has not yet been captured.
1284       // An arbitrary function that might load pointers could see captured
1285       // noalias arguments via other noalias arguments or globals, and so we
1286       // must always check for prior capture.
1287       for (const Argument *A : NoAliasArgs) {
1288         if (ObjSet.contains(A))
1289           continue; // May be based on a noalias argument.
1290 
1291         // It might be tempting to skip the PointerMayBeCapturedBefore check if
1292         // A->hasNoCaptureAttr() is true, but this is incorrect because
1293         // nocapture only guarantees that no copies outlive the function, not
1294         // that the value cannot be locally captured.
1295         if (!RequiresNoCaptureBefore ||
1296             !PointerMayBeCapturedBefore(A, /* ReturnCaptures */ false,
1297                                         /* StoreCaptures */ false, I, &DT))
1298           NoAliases.push_back(NewScopes[A]);
1299       }
1300 
1301       if (!NoAliases.empty())
1302         NI->setMetadata(LLVMContext::MD_noalias,
1303                         MDNode::concatenate(
1304                             NI->getMetadata(LLVMContext::MD_noalias),
1305                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1306 
1307       // Next, we want to figure out all of the sets to which we might belong.
1308       // We might belong to a set if the noalias argument is in the set of
1309       // underlying objects. If there is some non-noalias argument in our list
1310       // of underlying objects, then we cannot add a scope because the fact
1311       // that some access does not alias with any set of our noalias arguments
1312       // cannot itself guarantee that it does not alias with this access
1313       // (because there is some pointer of unknown origin involved and the
1314       // other access might also depend on this pointer). We also cannot add
1315       // scopes to arbitrary functions unless we know they don't access any
1316       // non-parameter pointer-values.
1317       bool CanAddScopes = !UsesAliasingPtr;
1318       if (CanAddScopes && IsFuncCall)
1319         CanAddScopes = IsArgMemOnlyCall;
1320 
1321       if (CanAddScopes)
1322         for (const Argument *A : NoAliasArgs) {
1323           if (ObjSet.count(A))
1324             Scopes.push_back(NewScopes[A]);
1325         }
1326 
1327       if (!Scopes.empty())
1328         NI->setMetadata(
1329             LLVMContext::MD_alias_scope,
1330             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1331                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1332     }
1333   }
1334 }
1335 
1336 static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin,
1337                                                    ReturnInst *End) {
1338 
1339   assert(Begin->getParent() == End->getParent() &&
1340          "Expected to be in same basic block!");
1341   auto BeginIt = Begin->getIterator();
1342   assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");
1343   return !llvm::isGuaranteedToTransferExecutionToSuccessor(
1344       ++BeginIt, End->getIterator(), InlinerAttributeWindow + 1);
1345 }
1346 
1347 // Only allow these white listed attributes to be propagated back to the
1348 // callee. This is because other attributes may only be valid on the call
1349 // itself, i.e. attributes such as signext and zeroext.
1350 
1351 // Attributes that are always okay to propagate as if they are violated its
1352 // immediate UB.
1353 static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB) {
1354   AttrBuilder Valid(CB.getContext());
1355   if (auto DerefBytes = CB.getRetDereferenceableBytes())
1356     Valid.addDereferenceableAttr(DerefBytes);
1357   if (auto DerefOrNullBytes = CB.getRetDereferenceableOrNullBytes())
1358     Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1359   if (CB.hasRetAttr(Attribute::NoAlias))
1360     Valid.addAttribute(Attribute::NoAlias);
1361   if (CB.hasRetAttr(Attribute::NoUndef))
1362     Valid.addAttribute(Attribute::NoUndef);
1363   return Valid;
1364 }
1365 
1366 // Attributes that need additional checks as propagating them may change
1367 // behavior or cause new UB.
1368 static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB) {
1369   AttrBuilder Valid(CB.getContext());
1370   if (CB.hasRetAttr(Attribute::NonNull))
1371     Valid.addAttribute(Attribute::NonNull);
1372   if (CB.hasRetAttr(Attribute::Alignment))
1373     Valid.addAlignmentAttr(CB.getRetAlign());
1374   return Valid;
1375 }
1376 
1377 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
1378   AttrBuilder ValidUB = IdentifyValidUBGeneratingAttributes(CB);
1379   AttrBuilder ValidPG = IdentifyValidPoisonGeneratingAttributes(CB);
1380   if (!ValidUB.hasAttributes() && !ValidPG.hasAttributes())
1381     return;
1382   auto *CalledFunction = CB.getCalledFunction();
1383   auto &Context = CalledFunction->getContext();
1384 
1385   for (auto &BB : *CalledFunction) {
1386     auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1387     if (!RI || !isa<CallBase>(RI->getOperand(0)))
1388       continue;
1389     auto *RetVal = cast<CallBase>(RI->getOperand(0));
1390     // Check that the cloned RetVal exists and is a call, otherwise we cannot
1391     // add the attributes on the cloned RetVal. Simplification during inlining
1392     // could have transformed the cloned instruction.
1393     auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1394     if (!NewRetVal)
1395       continue;
1396     // Backward propagation of attributes to the returned value may be incorrect
1397     // if it is control flow dependent.
1398     // Consider:
1399     // @callee {
1400     //  %rv = call @foo()
1401     //  %rv2 = call @bar()
1402     //  if (%rv2 != null)
1403     //    return %rv2
1404     //  if (%rv == null)
1405     //    exit()
1406     //  return %rv
1407     // }
1408     // caller() {
1409     //   %val = call nonnull @callee()
1410     // }
1411     // Here we cannot add the nonnull attribute on either foo or bar. So, we
1412     // limit the check to both RetVal and RI are in the same basic block and
1413     // there are no throwing/exiting instructions between these instructions.
1414     if (RI->getParent() != RetVal->getParent() ||
1415         MayContainThrowingOrExitingCallAfterCB(RetVal, RI))
1416       continue;
1417     // Add to the existing attributes of NewRetVal, i.e. the cloned call
1418     // instruction.
1419     // NB! When we have the same attribute already existing on NewRetVal, but
1420     // with a differing value, the AttributeList's merge API honours the already
1421     // existing attribute value (i.e. attributes such as dereferenceable,
1422     // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1423     AttributeList AL = NewRetVal->getAttributes();
1424     if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())
1425       ValidUB.removeAttribute(Attribute::Dereferenceable);
1426     if (ValidUB.getDereferenceableOrNullBytes() <
1427         AL.getRetDereferenceableOrNullBytes())
1428       ValidUB.removeAttribute(Attribute::DereferenceableOrNull);
1429     AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1430     // Attributes that may generate poison returns are a bit tricky. If we
1431     // propagate them, other uses of the callsite might have their behavior
1432     // change or cause UB (if they have noundef) b.c of the new potential
1433     // poison.
1434     // Take the following three cases:
1435     //
1436     // 1)
1437     // define nonnull ptr @foo() {
1438     //   %p = call ptr @bar()
1439     //   call void @use(ptr %p) willreturn nounwind
1440     //   ret ptr %p
1441     // }
1442     //
1443     // 2)
1444     // define noundef nonnull ptr @foo() {
1445     //   %p = call ptr @bar()
1446     //   call void @use(ptr %p) willreturn nounwind
1447     //   ret ptr %p
1448     // }
1449     //
1450     // 3)
1451     // define nonnull ptr @foo() {
1452     //   %p = call noundef ptr @bar()
1453     //   ret ptr %p
1454     // }
1455     //
1456     // In case 1, we can't propagate nonnull because poison value in @use may
1457     // change behavior or trigger UB.
1458     // In case 2, we don't need to be concerned about propagating nonnull, as
1459     // any new poison at @use will trigger UB anyways.
1460     // In case 3, we can never propagate nonnull because it may create UB due to
1461     // the noundef on @bar.
1462     if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())
1463       ValidPG.removeAttribute(Attribute::Alignment);
1464     if (ValidPG.hasAttributes()) {
1465       // Three checks.
1466       // If the callsite has `noundef`, then a poison due to violating the
1467       // return attribute will create UB anyways so we can always propagate.
1468       // Otherwise, if the return value (callee to be inlined) has `noundef`, we
1469       // can't propagate as a new poison return will cause UB.
1470       // Finally, check if the return value has no uses whose behavior may
1471       // change/may cause UB if we potentially return poison. At the moment this
1472       // is implemented overly conservatively with a single-use check.
1473       // TODO: Update the single-use check to iterate through uses and only bail
1474       // if we have a potentially dangerous use.
1475 
1476       if (CB.hasRetAttr(Attribute::NoUndef) ||
1477           (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1478         NewAL = NewAL.addRetAttributes(Context, ValidPG);
1479     }
1480     NewRetVal->setAttributes(NewAL);
1481   }
1482 }
1483 
1484 /// If the inlined function has non-byval align arguments, then
1485 /// add @llvm.assume-based alignment assumptions to preserve this information.
1486 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) {
1487   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1488     return;
1489 
1490   AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1491   auto &DL = CB.getCaller()->getParent()->getDataLayout();
1492 
1493   // To avoid inserting redundant assumptions, we should check for assumptions
1494   // already in the caller. To do this, we might need a DT of the caller.
1495   DominatorTree DT;
1496   bool DTCalculated = false;
1497 
1498   Function *CalledFunc = CB.getCalledFunction();
1499   for (Argument &Arg : CalledFunc->args()) {
1500     if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1501         Arg.hasNUses(0))
1502       continue;
1503     MaybeAlign Alignment = Arg.getParamAlign();
1504     if (!Alignment)
1505       continue;
1506 
1507     if (!DTCalculated) {
1508       DT.recalculate(*CB.getCaller());
1509       DTCalculated = true;
1510     }
1511     // If we can already prove the asserted alignment in the context of the
1512     // caller, then don't bother inserting the assumption.
1513     Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1514     if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= *Alignment)
1515       continue;
1516 
1517     CallInst *NewAsmp = IRBuilder<>(&CB).CreateAlignmentAssumption(
1518         DL, ArgVal, Alignment->value());
1519     AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1520   }
1521 }
1522 
1523 static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
1524                                     Module *M, BasicBlock *InsertBlock,
1525                                     InlineFunctionInfo &IFI,
1526                                     Function *CalledFunc) {
1527   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1528 
1529   Value *Size =
1530       Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1531 
1532   // Always generate a memcpy of alignment 1 here because we don't know
1533   // the alignment of the src pointer.  Other optimizations can infer
1534   // better alignment.
1535   CallInst *CI = Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1536                                       /*SrcAlign*/ Align(1), Size);
1537 
1538   // The verifier requires that all calls of debug-info-bearing functions
1539   // from debug-info-bearing functions have a debug location (for inlining
1540   // purposes). Assign a dummy location to satisfy the constraint.
1541   if (!CI->getDebugLoc() && InsertBlock->getParent()->getSubprogram())
1542     if (DISubprogram *SP = CalledFunc->getSubprogram())
1543       CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1544 }
1545 
1546 /// When inlining a call site that has a byval argument,
1547 /// we have to make the implicit memcpy explicit by adding it.
1548 static Value *HandleByValArgument(Type *ByValType, Value *Arg,
1549                                   Instruction *TheCall,
1550                                   const Function *CalledFunc,
1551                                   InlineFunctionInfo &IFI,
1552                                   MaybeAlign ByValAlignment) {
1553   Function *Caller = TheCall->getFunction();
1554   const DataLayout &DL = Caller->getParent()->getDataLayout();
1555 
1556   // If the called function is readonly, then it could not mutate the caller's
1557   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1558   // temporary.
1559   if (CalledFunc->onlyReadsMemory()) {
1560     // If the byval argument has a specified alignment that is greater than the
1561     // passed in pointer, then we either have to round up the input pointer or
1562     // give up on this transformation.
1563     if (ByValAlignment.valueOrOne() == 1)
1564       return Arg;
1565 
1566     AssumptionCache *AC =
1567         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1568 
1569     // If the pointer is already known to be sufficiently aligned, or if we can
1570     // round it up to a larger alignment, then we don't need a temporary.
1571     if (getOrEnforceKnownAlignment(Arg, *ByValAlignment, DL, TheCall, AC) >=
1572         *ByValAlignment)
1573       return Arg;
1574 
1575     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1576     // for code quality, but rarely happens and is required for correctness.
1577   }
1578 
1579   // Create the alloca.  If we have DataLayout, use nice alignment.
1580   Align Alignment = DL.getPrefTypeAlign(ByValType);
1581 
1582   // If the byval had an alignment specified, we *must* use at least that
1583   // alignment, as it is required by the byval argument (and uses of the
1584   // pointer inside the callee).
1585   if (ByValAlignment)
1586     Alignment = std::max(Alignment, *ByValAlignment);
1587 
1588   AllocaInst *NewAlloca = new AllocaInst(ByValType, DL.getAllocaAddrSpace(),
1589                                          nullptr, Alignment, Arg->getName());
1590   NewAlloca->insertBefore(Caller->begin()->begin());
1591   IFI.StaticAllocas.push_back(NewAlloca);
1592 
1593   // Uses of the argument in the function should use our new alloca
1594   // instead.
1595   return NewAlloca;
1596 }
1597 
1598 // Check whether this Value is used by a lifetime intrinsic.
1599 static bool isUsedByLifetimeMarker(Value *V) {
1600   for (User *U : V->users())
1601     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1602       if (II->isLifetimeStartOrEnd())
1603         return true;
1604   return false;
1605 }
1606 
1607 // Check whether the given alloca already has
1608 // lifetime.start or lifetime.end intrinsics.
1609 static bool hasLifetimeMarkers(AllocaInst *AI) {
1610   Type *Ty = AI->getType();
1611   Type *Int8PtrTy =
1612       PointerType::get(Ty->getContext(), Ty->getPointerAddressSpace());
1613   if (Ty == Int8PtrTy)
1614     return isUsedByLifetimeMarker(AI);
1615 
1616   // Do a scan to find all the casts to i8*.
1617   for (User *U : AI->users()) {
1618     if (U->getType() != Int8PtrTy) continue;
1619     if (U->stripPointerCasts() != AI) continue;
1620     if (isUsedByLifetimeMarker(U))
1621       return true;
1622   }
1623   return false;
1624 }
1625 
1626 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1627 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1628 /// cannot be static.
1629 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1630   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1631 }
1632 
1633 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1634 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1635 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1636                                LLVMContext &Ctx,
1637                                DenseMap<const MDNode *, MDNode *> &IANodes) {
1638   auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1639   return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1640                          OrigDL.getScope(), IA);
1641 }
1642 
1643 /// Update inlined instructions' line numbers to
1644 /// to encode location where these instructions are inlined.
1645 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1646                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1647   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1648   if (!TheCallDL)
1649     return;
1650 
1651   auto &Ctx = Fn->getContext();
1652   DILocation *InlinedAtNode = TheCallDL;
1653 
1654   // Create a unique call site, not to be confused with any other call from the
1655   // same location.
1656   InlinedAtNode = DILocation::getDistinct(
1657       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1658       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1659 
1660   // Cache the inlined-at nodes as they're built so they are reused, without
1661   // this every instruction's inlined-at chain would become distinct from each
1662   // other.
1663   DenseMap<const MDNode *, MDNode *> IANodes;
1664 
1665   // Check if we are not generating inline line tables and want to use
1666   // the call site location instead.
1667   bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1668 
1669   // Helper-util for updating the metadata attached to an instruction.
1670   auto UpdateInst = [&](Instruction &I) {
1671     // Loop metadata needs to be updated so that the start and end locs
1672     // reference inlined-at locations.
1673     auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1674                               &IANodes](Metadata *MD) -> Metadata * {
1675       if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1676         return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1677       return MD;
1678     };
1679     updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1680 
1681     if (!NoInlineLineTables)
1682       if (DebugLoc DL = I.getDebugLoc()) {
1683         DebugLoc IDL =
1684             inlineDebugLoc(DL, InlinedAtNode, I.getContext(), IANodes);
1685         I.setDebugLoc(IDL);
1686         return;
1687       }
1688 
1689     if (CalleeHasDebugInfo && !NoInlineLineTables)
1690       return;
1691 
1692     // If the inlined instruction has no line number, or if inline info
1693     // is not being generated, make it look as if it originates from the call
1694     // location. This is important for ((__always_inline, __nodebug__))
1695     // functions which must use caller location for all instructions in their
1696     // function body.
1697 
1698     // Don't update static allocas, as they may get moved later.
1699     if (auto *AI = dyn_cast<AllocaInst>(&I))
1700       if (allocaWouldBeStaticInEntry(AI))
1701         return;
1702 
1703     // Do not force a debug loc for pseudo probes, since they do not need to
1704     // be debuggable, and also they are expected to have a zero/null dwarf
1705     // discriminator at this point which could be violated otherwise.
1706     if (isa<PseudoProbeInst>(I))
1707       return;
1708 
1709     I.setDebugLoc(TheCallDL);
1710   };
1711 
1712   // Helper-util for updating debug-info records attached to instructions.
1713   auto UpdateDPV = [&](DPValue *DPV) {
1714     assert(DPV->getDebugLoc() && "Debug Value must have debug loc");
1715     if (NoInlineLineTables) {
1716       DPV->setDebugLoc(TheCallDL);
1717       return;
1718     }
1719     DebugLoc DL = DPV->getDebugLoc();
1720     DebugLoc IDL =
1721         inlineDebugLoc(DL, InlinedAtNode,
1722                        DPV->getMarker()->getParent()->getContext(), IANodes);
1723     DPV->setDebugLoc(IDL);
1724   };
1725 
1726   // Iterate over all instructions, updating metadata and debug-info records.
1727   for (; FI != Fn->end(); ++FI) {
1728     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE;
1729          ++BI) {
1730       UpdateInst(*BI);
1731       for (DPValue &DPV : BI->getDbgValueRange()) {
1732         UpdateDPV(&DPV);
1733       }
1734     }
1735 
1736     // Remove debug info intrinsics if we're not keeping inline info.
1737     if (NoInlineLineTables) {
1738       BasicBlock::iterator BI = FI->begin();
1739       while (BI != FI->end()) {
1740         if (isa<DbgInfoIntrinsic>(BI)) {
1741           BI = BI->eraseFromParent();
1742           continue;
1743         } else {
1744           BI->dropDbgValues();
1745         }
1746         ++BI;
1747       }
1748     }
1749   }
1750 }
1751 
1752 #undef DEBUG_TYPE
1753 #define DEBUG_TYPE "assignment-tracking"
1754 /// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB.
1755 static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL,
1756                                                  const CallBase &CB) {
1757   at::StorageToVarsMap EscapedLocals;
1758   SmallPtrSet<const Value *, 4> SeenBases;
1759 
1760   LLVM_DEBUG(
1761       errs() << "# Finding caller local variables escaped by callee\n");
1762   for (const Value *Arg : CB.args()) {
1763     LLVM_DEBUG(errs() << "INSPECT: " << *Arg << "\n");
1764     if (!Arg->getType()->isPointerTy()) {
1765       LLVM_DEBUG(errs() << " | SKIP: Not a pointer\n");
1766       continue;
1767     }
1768 
1769     const Instruction *I = dyn_cast<Instruction>(Arg);
1770     if (!I) {
1771       LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");
1772       continue;
1773     }
1774 
1775     // Walk back to the base storage.
1776     assert(Arg->getType()->isPtrOrPtrVectorTy());
1777     APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);
1778     const AllocaInst *Base = dyn_cast<AllocaInst>(
1779         Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));
1780     if (!Base) {
1781       LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");
1782       continue;
1783     }
1784 
1785     assert(Base);
1786     LLVM_DEBUG(errs() << " | BASE: " << *Base << "\n");
1787     // We only need to process each base address once - skip any duplicates.
1788     if (!SeenBases.insert(Base).second)
1789       continue;
1790 
1791     // Find all local variables associated with the backing storage.
1792     for (auto *DAI : at::getAssignmentMarkers(Base)) {
1793       // Skip variables from inlined functions - they are not local variables.
1794       if (DAI->getDebugLoc().getInlinedAt())
1795         continue;
1796       LLVM_DEBUG(errs() << " > DEF : " << *DAI << "\n");
1797       EscapedLocals[Base].insert(at::VarRecord(DAI));
1798     }
1799   }
1800   return EscapedLocals;
1801 }
1802 
1803 static void trackInlinedStores(Function::iterator Start, Function::iterator End,
1804                                const CallBase &CB) {
1805   LLVM_DEBUG(errs() << "trackInlinedStores into "
1806                     << Start->getParent()->getName() << " from "
1807                     << CB.getCalledFunction()->getName() << "\n");
1808   std::unique_ptr<DataLayout> DL = std::make_unique<DataLayout>(CB.getModule());
1809   at::trackAssignments(Start, End, collectEscapedLocals(*DL, CB), *DL);
1810 }
1811 
1812 /// Update inlined instructions' DIAssignID metadata. We need to do this
1813 /// otherwise a function inlined more than once into the same function
1814 /// will cause DIAssignID to be shared by many instructions.
1815 static void fixupAssignments(Function::iterator Start, Function::iterator End) {
1816   // Map {Old, New} metadata. Not used directly - use GetNewID.
1817   DenseMap<DIAssignID *, DIAssignID *> Map;
1818   auto GetNewID = [&Map](Metadata *Old) {
1819     DIAssignID *OldID = cast<DIAssignID>(Old);
1820     if (DIAssignID *NewID = Map.lookup(OldID))
1821       return NewID;
1822     DIAssignID *NewID = DIAssignID::getDistinct(OldID->getContext());
1823     Map[OldID] = NewID;
1824     return NewID;
1825   };
1826   // Loop over all the inlined instructions. If we find a DIAssignID
1827   // attachment or use, replace it with a new version.
1828   for (auto BBI = Start; BBI != End; ++BBI) {
1829     for (Instruction &I : *BBI) {
1830       if (auto *ID = I.getMetadata(LLVMContext::MD_DIAssignID))
1831         I.setMetadata(LLVMContext::MD_DIAssignID, GetNewID(ID));
1832       else if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&I))
1833         DAI->setAssignId(GetNewID(DAI->getAssignID()));
1834     }
1835   }
1836 }
1837 #undef DEBUG_TYPE
1838 #define DEBUG_TYPE "inline-function"
1839 
1840 /// Update the block frequencies of the caller after a callee has been inlined.
1841 ///
1842 /// Each block cloned into the caller has its block frequency scaled by the
1843 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1844 /// callee's entry block gets the same frequency as the callsite block and the
1845 /// relative frequencies of all cloned blocks remain the same after cloning.
1846 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1847                             const ValueToValueMapTy &VMap,
1848                             BlockFrequencyInfo *CallerBFI,
1849                             BlockFrequencyInfo *CalleeBFI,
1850                             const BasicBlock &CalleeEntryBlock) {
1851   SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1852   for (auto Entry : VMap) {
1853     if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1854       continue;
1855     auto *OrigBB = cast<BasicBlock>(Entry.first);
1856     auto *ClonedBB = cast<BasicBlock>(Entry.second);
1857     BlockFrequency Freq = CalleeBFI->getBlockFreq(OrigBB);
1858     if (!ClonedBBs.insert(ClonedBB).second) {
1859       // Multiple blocks in the callee might get mapped to one cloned block in
1860       // the caller since we prune the callee as we clone it. When that happens,
1861       // we want to use the maximum among the original blocks' frequencies.
1862       BlockFrequency NewFreq = CallerBFI->getBlockFreq(ClonedBB);
1863       if (NewFreq > Freq)
1864         Freq = NewFreq;
1865     }
1866     CallerBFI->setBlockFreq(ClonedBB, Freq);
1867   }
1868   BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1869   CallerBFI->setBlockFreqAndScale(
1870       EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);
1871 }
1872 
1873 /// Update the branch metadata for cloned call instructions.
1874 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1875                               const ProfileCount &CalleeEntryCount,
1876                               const CallBase &TheCall, ProfileSummaryInfo *PSI,
1877                               BlockFrequencyInfo *CallerBFI) {
1878   if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)
1879     return;
1880   auto CallSiteCount =
1881       PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;
1882   int64_t CallCount =
1883       std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());
1884   updateProfileCallee(Callee, -CallCount, &VMap);
1885 }
1886 
1887 void llvm::updateProfileCallee(
1888     Function *Callee, int64_t EntryDelta,
1889     const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1890   auto CalleeCount = Callee->getEntryCount();
1891   if (!CalleeCount)
1892     return;
1893 
1894   const uint64_t PriorEntryCount = CalleeCount->getCount();
1895 
1896   // Since CallSiteCount is an estimate, it could exceed the original callee
1897   // count and has to be set to 0 so guard against underflow.
1898   const uint64_t NewEntryCount =
1899       (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
1900           ? 0
1901           : PriorEntryCount + EntryDelta;
1902 
1903   // During inlining ?
1904   if (VMap) {
1905     uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
1906     for (auto Entry : *VMap)
1907       if (isa<CallInst>(Entry.first))
1908         if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1909           CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
1910   }
1911 
1912   if (EntryDelta) {
1913     Callee->setEntryCount(NewEntryCount);
1914 
1915     for (BasicBlock &BB : *Callee)
1916       // No need to update the callsite if it is pruned during inlining.
1917       if (!VMap || VMap->count(&BB))
1918         for (Instruction &I : BB)
1919           if (CallInst *CI = dyn_cast<CallInst>(&I))
1920             CI->updateProfWeight(NewEntryCount, PriorEntryCount);
1921   }
1922 }
1923 
1924 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call
1925 /// result is implicitly consumed by a call to retainRV or claimRV immediately
1926 /// after the call. This function inlines the retainRV/claimRV calls.
1927 ///
1928 /// There are three cases to consider:
1929 ///
1930 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
1931 ///    object in the callee return block, the autoreleaseRV call and the
1932 ///    retainRV/claimRV call in the caller cancel out. If the call in the caller
1933 ///    is a claimRV call, a call to objc_release is emitted.
1934 ///
1935 /// 2. If there is a call in the callee return block that doesn't have operand
1936 ///    bundle "clang.arc.attachedcall", the operand bundle on the original call
1937 ///    is transferred to the call in the callee.
1938 ///
1939 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
1940 ///    a retainRV call.
1941 static void
1942 inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
1943                            const SmallVectorImpl<ReturnInst *> &Returns) {
1944   Module *Mod = CB.getModule();
1945   assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
1946   bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
1947        IsUnsafeClaimRV = !IsRetainRV;
1948 
1949   for (auto *RI : Returns) {
1950     Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
1951     bool InsertRetainCall = IsRetainRV;
1952     IRBuilder<> Builder(RI->getContext());
1953 
1954     // Walk backwards through the basic block looking for either a matching
1955     // autoreleaseRV call or an unannotated call.
1956     auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()),
1957                                       RI->getParent()->rend());
1958     for (Instruction &I : llvm::make_early_inc_range(InstRange)) {
1959       // Ignore casts.
1960       if (isa<CastInst>(I))
1961         continue;
1962 
1963       if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1964         if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
1965             !II->hasNUses(0) ||
1966             objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
1967           break;
1968 
1969         // If we've found a matching authoreleaseRV call:
1970         // - If claimRV is attached to the call, insert a call to objc_release
1971         //   and erase the autoreleaseRV call.
1972         // - If retainRV is attached to the call, just erase the autoreleaseRV
1973         //   call.
1974         if (IsUnsafeClaimRV) {
1975           Builder.SetInsertPoint(II);
1976           Function *IFn =
1977               Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
1978           Builder.CreateCall(IFn, RetOpnd, "");
1979         }
1980         II->eraseFromParent();
1981         InsertRetainCall = false;
1982         break;
1983       }
1984 
1985       auto *CI = dyn_cast<CallInst>(&I);
1986 
1987       if (!CI)
1988         break;
1989 
1990       if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
1991           objcarc::hasAttachedCallOpBundle(CI))
1992         break;
1993 
1994       // If we've found an unannotated call that defines RetOpnd, add a
1995       // "clang.arc.attachedcall" operand bundle.
1996       Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
1997       OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
1998       auto *NewCall = CallBase::addOperandBundle(
1999           CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI);
2000       NewCall->copyMetadata(*CI);
2001       CI->replaceAllUsesWith(NewCall);
2002       CI->eraseFromParent();
2003       InsertRetainCall = false;
2004       break;
2005     }
2006 
2007     if (InsertRetainCall) {
2008       // The retainRV is attached to the call and we've failed to find a
2009       // matching autoreleaseRV or an annotated call in the callee. Emit a call
2010       // to objc_retain.
2011       Builder.SetInsertPoint(RI);
2012       Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
2013       Builder.CreateCall(IFn, RetOpnd, "");
2014     }
2015   }
2016 }
2017 
2018 /// This function inlines the called function into the basic block of the
2019 /// caller. This returns false if it is not possible to inline this call.
2020 /// The program is still in a well defined state if this occurs though.
2021 ///
2022 /// Note that this only does one level of inlining.  For example, if the
2023 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
2024 /// exists in the instruction stream.  Similarly this will inline a recursive
2025 /// function by one level.
2026 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
2027                                         bool MergeAttributes,
2028                                         AAResults *CalleeAAR,
2029                                         bool InsertLifetime,
2030                                         Function *ForwardVarArgsTo) {
2031   assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
2032 
2033   // FIXME: we don't inline callbr yet.
2034   if (isa<CallBrInst>(CB))
2035     return InlineResult::failure("We don't inline callbr yet.");
2036 
2037   // If IFI has any state in it, zap it before we fill it in.
2038   IFI.reset();
2039 
2040   Function *CalledFunc = CB.getCalledFunction();
2041   if (!CalledFunc ||               // Can't inline external function or indirect
2042       CalledFunc->isDeclaration()) // call!
2043     return InlineResult::failure("external or indirect");
2044 
2045   // The inliner does not know how to inline through calls with operand bundles
2046   // in general ...
2047   Value *ConvergenceControlToken = nullptr;
2048   if (CB.hasOperandBundles()) {
2049     for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
2050       auto OBUse = CB.getOperandBundleAt(i);
2051       uint32_t Tag = OBUse.getTagID();
2052       // ... but it knows how to inline through "deopt" operand bundles ...
2053       if (Tag == LLVMContext::OB_deopt)
2054         continue;
2055       // ... and "funclet" operand bundles.
2056       if (Tag == LLVMContext::OB_funclet)
2057         continue;
2058       if (Tag == LLVMContext::OB_clang_arc_attachedcall)
2059         continue;
2060       if (Tag == LLVMContext::OB_kcfi)
2061         continue;
2062       if (Tag == LLVMContext::OB_convergencectrl) {
2063         ConvergenceControlToken = OBUse.Inputs[0].get();
2064         continue;
2065       }
2066 
2067       return InlineResult::failure("unsupported operand bundle");
2068     }
2069   }
2070 
2071   // FIXME: The check below is redundant and incomplete. According to spec, if a
2072   // convergent call is missing a token, then the caller is using uncontrolled
2073   // convergence. If the callee has an entry intrinsic, then the callee is using
2074   // controlled convergence, and the call cannot be inlined. A proper
2075   // implemenation of this check requires a whole new analysis that identifies
2076   // convergence in every function. For now, we skip that and just do this one
2077   // cursory check. The underlying assumption is that in a compiler flow that
2078   // fully implements convergence control tokens, there is no mixing of
2079   // controlled and uncontrolled convergent operations in the whole program.
2080   if (CB.isConvergent()) {
2081     auto *I = CalledFunc->getEntryBlock().getFirstNonPHI();
2082     if (auto *IntrinsicCall = dyn_cast<IntrinsicInst>(I)) {
2083       if (IntrinsicCall->getIntrinsicID() ==
2084           Intrinsic::experimental_convergence_entry) {
2085         if (!ConvergenceControlToken) {
2086           return InlineResult::failure(
2087               "convergent call needs convergencectrl operand");
2088         }
2089       }
2090     }
2091   }
2092 
2093   // If the call to the callee cannot throw, set the 'nounwind' flag on any
2094   // calls that we inline.
2095   bool MarkNoUnwind = CB.doesNotThrow();
2096 
2097   BasicBlock *OrigBB = CB.getParent();
2098   Function *Caller = OrigBB->getParent();
2099 
2100   // Do not inline strictfp function into non-strictfp one. It would require
2101   // conversion of all FP operations in host function to constrained intrinsics.
2102   if (CalledFunc->getAttributes().hasFnAttr(Attribute::StrictFP) &&
2103       !Caller->getAttributes().hasFnAttr(Attribute::StrictFP)) {
2104     return InlineResult::failure("incompatible strictfp attributes");
2105   }
2106 
2107   // GC poses two hazards to inlining, which only occur when the callee has GC:
2108   //  1. If the caller has no GC, then the callee's GC must be propagated to the
2109   //     caller.
2110   //  2. If the caller has a differing GC, it is invalid to inline.
2111   if (CalledFunc->hasGC()) {
2112     if (!Caller->hasGC())
2113       Caller->setGC(CalledFunc->getGC());
2114     else if (CalledFunc->getGC() != Caller->getGC())
2115       return InlineResult::failure("incompatible GC");
2116   }
2117 
2118   // Get the personality function from the callee if it contains a landing pad.
2119   Constant *CalledPersonality =
2120       CalledFunc->hasPersonalityFn()
2121           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
2122           : nullptr;
2123 
2124   // Find the personality function used by the landing pads of the caller. If it
2125   // exists, then check to see that it matches the personality function used in
2126   // the callee.
2127   Constant *CallerPersonality =
2128       Caller->hasPersonalityFn()
2129           ? Caller->getPersonalityFn()->stripPointerCasts()
2130           : nullptr;
2131   if (CalledPersonality) {
2132     if (!CallerPersonality)
2133       Caller->setPersonalityFn(CalledPersonality);
2134     // If the personality functions match, then we can perform the
2135     // inlining. Otherwise, we can't inline.
2136     // TODO: This isn't 100% true. Some personality functions are proper
2137     //       supersets of others and can be used in place of the other.
2138     else if (CalledPersonality != CallerPersonality)
2139       return InlineResult::failure("incompatible personality");
2140   }
2141 
2142   // We need to figure out which funclet the callsite was in so that we may
2143   // properly nest the callee.
2144   Instruction *CallSiteEHPad = nullptr;
2145   if (CallerPersonality) {
2146     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
2147     if (isScopedEHPersonality(Personality)) {
2148       std::optional<OperandBundleUse> ParentFunclet =
2149           CB.getOperandBundle(LLVMContext::OB_funclet);
2150       if (ParentFunclet)
2151         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2152 
2153       // OK, the inlining site is legal.  What about the target function?
2154 
2155       if (CallSiteEHPad) {
2156         if (Personality == EHPersonality::MSVC_CXX) {
2157           // The MSVC personality cannot tolerate catches getting inlined into
2158           // cleanup funclets.
2159           if (isa<CleanupPadInst>(CallSiteEHPad)) {
2160             // Ok, the call site is within a cleanuppad.  Let's check the callee
2161             // for catchpads.
2162             for (const BasicBlock &CalledBB : *CalledFunc) {
2163               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
2164                 return InlineResult::failure("catch in cleanup funclet");
2165             }
2166           }
2167         } else if (isAsynchronousEHPersonality(Personality)) {
2168           // SEH is even less tolerant, there may not be any sort of exceptional
2169           // funclet in the callee.
2170           for (const BasicBlock &CalledBB : *CalledFunc) {
2171             if (CalledBB.isEHPad())
2172               return InlineResult::failure("SEH in cleanup funclet");
2173           }
2174         }
2175       }
2176     }
2177   }
2178 
2179   // Determine if we are dealing with a call in an EHPad which does not unwind
2180   // to caller.
2181   bool EHPadForCallUnwindsLocally = false;
2182   if (CallSiteEHPad && isa<CallInst>(CB)) {
2183     UnwindDestMemoTy FuncletUnwindMap;
2184     Value *CallSiteUnwindDestToken =
2185         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
2186 
2187     EHPadForCallUnwindsLocally =
2188         CallSiteUnwindDestToken &&
2189         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2190   }
2191 
2192   // Get an iterator to the last basic block in the function, which will have
2193   // the new function inlined after it.
2194   Function::iterator LastBlock = --Caller->end();
2195 
2196   // Make sure to capture all of the return instructions from the cloned
2197   // function.
2198   SmallVector<ReturnInst*, 8> Returns;
2199   ClonedCodeInfo InlinedFunctionInfo;
2200   Function::iterator FirstNewBlock;
2201 
2202   { // Scope to destroy VMap after cloning.
2203     ValueToValueMapTy VMap;
2204     struct ByValInit {
2205       Value *Dst;
2206       Value *Src;
2207       Type *Ty;
2208     };
2209     // Keep a list of pair (dst, src) to emit byval initializations.
2210     SmallVector<ByValInit, 4> ByValInits;
2211 
2212     // When inlining a function that contains noalias scope metadata,
2213     // this metadata needs to be cloned so that the inlined blocks
2214     // have different "unique scopes" at every call site.
2215     // Track the metadata that must be cloned. Do this before other changes to
2216     // the function, so that we do not get in trouble when inlining caller ==
2217     // callee.
2218     ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
2219 
2220     auto &DL = Caller->getParent()->getDataLayout();
2221 
2222     // Calculate the vector of arguments to pass into the function cloner, which
2223     // matches up the formal to the actual argument values.
2224     auto AI = CB.arg_begin();
2225     unsigned ArgNo = 0;
2226     for (Function::arg_iterator I = CalledFunc->arg_begin(),
2227          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
2228       Value *ActualArg = *AI;
2229 
2230       // When byval arguments actually inlined, we need to make the copy implied
2231       // by them explicit.  However, we don't do this if the callee is readonly
2232       // or readnone, because the copy would be unneeded: the callee doesn't
2233       // modify the struct.
2234       if (CB.isByValArgument(ArgNo)) {
2235         ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
2236                                         &CB, CalledFunc, IFI,
2237                                         CalledFunc->getParamAlign(ArgNo));
2238         if (ActualArg != *AI)
2239           ByValInits.push_back(
2240               {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});
2241       }
2242 
2243       VMap[&*I] = ActualArg;
2244     }
2245 
2246     // TODO: Remove this when users have been updated to the assume bundles.
2247     // Add alignment assumptions if necessary. We do this before the inlined
2248     // instructions are actually cloned into the caller so that we can easily
2249     // check what will be known at the start of the inlined code.
2250     AddAlignmentAssumptions(CB, IFI);
2251 
2252     AssumptionCache *AC =
2253         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2254 
2255     /// Preserve all attributes on of the call and its parameters.
2256     salvageKnowledge(&CB, AC);
2257 
2258     // We want the inliner to prune the code as it copies.  We would LOVE to
2259     // have no dead or constant instructions leftover after inlining occurs
2260     // (which can happen, e.g., because an argument was constant), but we'll be
2261     // happy with whatever the cloner can do.
2262     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
2263                               /*ModuleLevelChanges=*/false, Returns, ".i",
2264                               &InlinedFunctionInfo);
2265     // Remember the first block that is newly cloned over.
2266     FirstNewBlock = LastBlock; ++FirstNewBlock;
2267 
2268     // Insert retainRV/clainRV runtime calls.
2269     objcarc::ARCInstKind RVCallKind = objcarc::getAttachedARCFunctionKind(&CB);
2270     if (RVCallKind != objcarc::ARCInstKind::None)
2271       inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
2272 
2273     // Updated caller/callee profiles only when requested. For sample loader
2274     // inlining, the context-sensitive inlinee profile doesn't need to be
2275     // subtracted from callee profile, and the inlined clone also doesn't need
2276     // to be scaled based on call site count.
2277     if (IFI.UpdateProfile) {
2278       if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
2279         // Update the BFI of blocks cloned into the caller.
2280         updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
2281                         CalledFunc->front());
2282 
2283       if (auto Profile = CalledFunc->getEntryCount())
2284         updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI,
2285                           IFI.CallerBFI);
2286     }
2287 
2288     // Inject byval arguments initialization.
2289     for (ByValInit &Init : ByValInits)
2290       HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(),
2291                               &*FirstNewBlock, IFI, CalledFunc);
2292 
2293     std::optional<OperandBundleUse> ParentDeopt =
2294         CB.getOperandBundle(LLVMContext::OB_deopt);
2295     if (ParentDeopt) {
2296       SmallVector<OperandBundleDef, 2> OpDefs;
2297 
2298       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
2299         CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2300         if (!ICS)
2301           continue; // instruction was DCE'd or RAUW'ed to undef
2302 
2303         OpDefs.clear();
2304 
2305         OpDefs.reserve(ICS->getNumOperandBundles());
2306 
2307         for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
2308              ++COBi) {
2309           auto ChildOB = ICS->getOperandBundleAt(COBi);
2310           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
2311             // If the inlined call has other operand bundles, let them be
2312             OpDefs.emplace_back(ChildOB);
2313             continue;
2314           }
2315 
2316           // It may be useful to separate this logic (of handling operand
2317           // bundles) out to a separate "policy" component if this gets crowded.
2318           // Prepend the parent's deoptimization continuation to the newly
2319           // inlined call's deoptimization continuation.
2320           std::vector<Value *> MergedDeoptArgs;
2321           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2322                                   ChildOB.Inputs.size());
2323 
2324           llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2325           llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2326 
2327           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2328         }
2329 
2330         Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
2331 
2332         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2333         // this even if the call returns void.
2334         ICS->replaceAllUsesWith(NewI);
2335 
2336         VH = nullptr;
2337         ICS->eraseFromParent();
2338       }
2339     }
2340 
2341     // For 'nodebug' functions, the associated DISubprogram is always null.
2342     // Conservatively avoid propagating the callsite debug location to
2343     // instructions inlined from a function whose DISubprogram is not null.
2344     fixupLineNumbers(Caller, FirstNewBlock, &CB,
2345                      CalledFunc->getSubprogram() != nullptr);
2346 
2347     if (isAssignmentTrackingEnabled(*Caller->getParent())) {
2348       // Interpret inlined stores to caller-local variables as assignments.
2349       trackInlinedStores(FirstNewBlock, Caller->end(), CB);
2350 
2351       // Update DIAssignID metadata attachments and uses so that they are
2352       // unique to this inlined instance.
2353       fixupAssignments(FirstNewBlock, Caller->end());
2354     }
2355 
2356     // Now clone the inlined noalias scope metadata.
2357     SAMetadataCloner.clone();
2358     SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2359 
2360     // Add noalias metadata if necessary.
2361     AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2362 
2363     // Clone return attributes on the callsite into the calls within the inlined
2364     // function which feed into its return value.
2365     AddReturnAttributes(CB, VMap);
2366 
2367     propagateMemProfMetadata(CalledFunc, CB,
2368                              InlinedFunctionInfo.ContainsMemProfMetadata, VMap);
2369 
2370     // Propagate metadata on the callsite if necessary.
2371     PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2372 
2373     // Register any cloned assumptions.
2374     if (IFI.GetAssumptionCache)
2375       for (BasicBlock &NewBlock :
2376            make_range(FirstNewBlock->getIterator(), Caller->end()))
2377         for (Instruction &I : NewBlock)
2378           if (auto *II = dyn_cast<AssumeInst>(&I))
2379             IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2380   }
2381 
2382   if (ConvergenceControlToken) {
2383     auto *I = FirstNewBlock->getFirstNonPHI();
2384     if (auto *IntrinsicCall = dyn_cast<IntrinsicInst>(I)) {
2385       if (IntrinsicCall->getIntrinsicID() ==
2386           Intrinsic::experimental_convergence_entry) {
2387         IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
2388         IntrinsicCall->eraseFromParent();
2389       }
2390     }
2391   }
2392 
2393   // If there are any alloca instructions in the block that used to be the entry
2394   // block for the callee, move them to the entry block of the caller.  First
2395   // calculate which instruction they should be inserted before.  We insert the
2396   // instructions at the end of the current alloca list.
2397   {
2398     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2399     for (BasicBlock::iterator I = FirstNewBlock->begin(),
2400          E = FirstNewBlock->end(); I != E; ) {
2401       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2402       if (!AI) continue;
2403 
2404       // If the alloca is now dead, remove it.  This often occurs due to code
2405       // specialization.
2406       if (AI->use_empty()) {
2407         AI->eraseFromParent();
2408         continue;
2409       }
2410 
2411       if (!allocaWouldBeStaticInEntry(AI))
2412         continue;
2413 
2414       // Keep track of the static allocas that we inline into the caller.
2415       IFI.StaticAllocas.push_back(AI);
2416 
2417       // Scan for the block of allocas that we can move over, and move them
2418       // all at once.
2419       while (isa<AllocaInst>(I) &&
2420              !cast<AllocaInst>(I)->use_empty() &&
2421              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2422         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2423         ++I;
2424       }
2425 
2426       // Transfer all of the allocas over in a block.  Using splice means
2427       // that the instructions aren't removed from the symbol table, then
2428       // reinserted.
2429       I.setTailBit(true);
2430       Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2431                                      AI->getIterator(), I);
2432     }
2433   }
2434 
2435   SmallVector<Value*,4> VarArgsToForward;
2436   SmallVector<AttributeSet, 4> VarArgsAttrs;
2437   for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2438        i < CB.arg_size(); i++) {
2439     VarArgsToForward.push_back(CB.getArgOperand(i));
2440     VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2441   }
2442 
2443   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2444   if (InlinedFunctionInfo.ContainsCalls) {
2445     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2446     if (CallInst *CI = dyn_cast<CallInst>(&CB))
2447       CallSiteTailKind = CI->getTailCallKind();
2448 
2449     // For inlining purposes, the "notail" marker is the same as no marker.
2450     if (CallSiteTailKind == CallInst::TCK_NoTail)
2451       CallSiteTailKind = CallInst::TCK_None;
2452 
2453     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2454          ++BB) {
2455       for (Instruction &I : llvm::make_early_inc_range(*BB)) {
2456         CallInst *CI = dyn_cast<CallInst>(&I);
2457         if (!CI)
2458           continue;
2459 
2460         // Forward varargs from inlined call site to calls to the
2461         // ForwardVarArgsTo function, if requested, and to musttail calls.
2462         if (!VarArgsToForward.empty() &&
2463             ((ForwardVarArgsTo &&
2464               CI->getCalledFunction() == ForwardVarArgsTo) ||
2465              CI->isMustTailCall())) {
2466           // Collect attributes for non-vararg parameters.
2467           AttributeList Attrs = CI->getAttributes();
2468           SmallVector<AttributeSet, 8> ArgAttrs;
2469           if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2470             for (unsigned ArgNo = 0;
2471                  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2472               ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
2473           }
2474 
2475           // Add VarArg attributes.
2476           ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2477           Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
2478                                      Attrs.getRetAttrs(), ArgAttrs);
2479           // Add VarArgs to existing parameters.
2480           SmallVector<Value *, 6> Params(CI->args());
2481           Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2482           CallInst *NewCI = CallInst::Create(
2483               CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2484           NewCI->setDebugLoc(CI->getDebugLoc());
2485           NewCI->setAttributes(Attrs);
2486           NewCI->setCallingConv(CI->getCallingConv());
2487           CI->replaceAllUsesWith(NewCI);
2488           CI->eraseFromParent();
2489           CI = NewCI;
2490         }
2491 
2492         if (Function *F = CI->getCalledFunction())
2493           InlinedDeoptimizeCalls |=
2494               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2495 
2496         // We need to reduce the strength of any inlined tail calls.  For
2497         // musttail, we have to avoid introducing potential unbounded stack
2498         // growth.  For example, if functions 'f' and 'g' are mutually recursive
2499         // with musttail, we can inline 'g' into 'f' so long as we preserve
2500         // musttail on the cloned call to 'f'.  If either the inlined call site
2501         // or the cloned call site is *not* musttail, the program already has
2502         // one frame of stack growth, so it's safe to remove musttail.  Here is
2503         // a table of example transformations:
2504         //
2505         //    f -> musttail g -> musttail f  ==>  f -> musttail f
2506         //    f -> musttail g ->     tail f  ==>  f ->     tail f
2507         //    f ->          g -> musttail f  ==>  f ->          f
2508         //    f ->          g ->     tail f  ==>  f ->          f
2509         //
2510         // Inlined notail calls should remain notail calls.
2511         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2512         if (ChildTCK != CallInst::TCK_NoTail)
2513           ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2514         CI->setTailCallKind(ChildTCK);
2515         InlinedMustTailCalls |= CI->isMustTailCall();
2516 
2517         // Call sites inlined through a 'nounwind' call site should be
2518         // 'nounwind' as well. However, avoid marking call sites explicitly
2519         // where possible. This helps expose more opportunities for CSE after
2520         // inlining, commonly when the callee is an intrinsic.
2521         if (MarkNoUnwind && !CI->doesNotThrow())
2522           CI->setDoesNotThrow();
2523       }
2524     }
2525   }
2526 
2527   // Leave lifetime markers for the static alloca's, scoping them to the
2528   // function we just inlined.
2529   // We need to insert lifetime intrinsics even at O0 to avoid invalid
2530   // access caused by multithreaded coroutines. The check
2531   // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2532   if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2533       !IFI.StaticAllocas.empty()) {
2534     IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2535     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2536       AllocaInst *AI = IFI.StaticAllocas[ai];
2537       // Don't mark swifterror allocas. They can't have bitcast uses.
2538       if (AI->isSwiftError())
2539         continue;
2540 
2541       // If the alloca is already scoped to something smaller than the whole
2542       // function then there's no need to add redundant, less accurate markers.
2543       if (hasLifetimeMarkers(AI))
2544         continue;
2545 
2546       // Try to determine the size of the allocation.
2547       ConstantInt *AllocaSize = nullptr;
2548       if (ConstantInt *AIArraySize =
2549           dyn_cast<ConstantInt>(AI->getArraySize())) {
2550         auto &DL = Caller->getParent()->getDataLayout();
2551         Type *AllocaType = AI->getAllocatedType();
2552         TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2553         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2554 
2555         // Don't add markers for zero-sized allocas.
2556         if (AllocaArraySize == 0)
2557           continue;
2558 
2559         // Check that array size doesn't saturate uint64_t and doesn't
2560         // overflow when it's multiplied by type size.
2561         if (!AllocaTypeSize.isScalable() &&
2562             AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2563             std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2564                 AllocaTypeSize.getFixedValue()) {
2565           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2566                                         AllocaArraySize * AllocaTypeSize);
2567         }
2568       }
2569 
2570       builder.CreateLifetimeStart(AI, AllocaSize);
2571       for (ReturnInst *RI : Returns) {
2572         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2573         // call and a return.  The return kills all local allocas.
2574         if (InlinedMustTailCalls &&
2575             RI->getParent()->getTerminatingMustTailCall())
2576           continue;
2577         if (InlinedDeoptimizeCalls &&
2578             RI->getParent()->getTerminatingDeoptimizeCall())
2579           continue;
2580         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2581       }
2582     }
2583   }
2584 
2585   // If the inlined code contained dynamic alloca instructions, wrap the inlined
2586   // code with llvm.stacksave/llvm.stackrestore intrinsics.
2587   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2588     // Insert the llvm.stacksave.
2589     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2590                              .CreateStackSave("savedstack");
2591 
2592     // Insert a call to llvm.stackrestore before any return instructions in the
2593     // inlined function.
2594     for (ReturnInst *RI : Returns) {
2595       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2596       // call and a return.  The return will restore the stack pointer.
2597       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2598         continue;
2599       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2600         continue;
2601       IRBuilder<>(RI).CreateStackRestore(SavedPtr);
2602     }
2603   }
2604 
2605   // If we are inlining for an invoke instruction, we must make sure to rewrite
2606   // any call instructions into invoke instructions.  This is sensitive to which
2607   // funclet pads were top-level in the inlinee, so must be done before
2608   // rewriting the "parent pad" links.
2609   if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2610     BasicBlock *UnwindDest = II->getUnwindDest();
2611     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2612     if (isa<LandingPadInst>(FirstNonPHI)) {
2613       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2614     } else {
2615       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2616     }
2617   }
2618 
2619   // Update the lexical scopes of the new funclets and callsites.
2620   // Anything that had 'none' as its parent is now nested inside the callsite's
2621   // EHPad.
2622   if (CallSiteEHPad) {
2623     for (Function::iterator BB = FirstNewBlock->getIterator(),
2624                             E = Caller->end();
2625          BB != E; ++BB) {
2626       // Add bundle operands to inlined call sites.
2627       PropagateOperandBundles(BB, CallSiteEHPad);
2628 
2629       // It is problematic if the inlinee has a cleanupret which unwinds to
2630       // caller and we inline it into a call site which doesn't unwind but into
2631       // an EH pad that does.  Such an edge must be dynamically unreachable.
2632       // As such, we replace the cleanupret with unreachable.
2633       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2634         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2635           changeToUnreachable(CleanupRet);
2636 
2637       Instruction *I = BB->getFirstNonPHI();
2638       if (!I->isEHPad())
2639         continue;
2640 
2641       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2642         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2643           CatchSwitch->setParentPad(CallSiteEHPad);
2644       } else {
2645         auto *FPI = cast<FuncletPadInst>(I);
2646         if (isa<ConstantTokenNone>(FPI->getParentPad()))
2647           FPI->setParentPad(CallSiteEHPad);
2648       }
2649     }
2650   }
2651 
2652   if (InlinedDeoptimizeCalls) {
2653     // We need to at least remove the deoptimizing returns from the Return set,
2654     // so that the control flow from those returns does not get merged into the
2655     // caller (but terminate it instead).  If the caller's return type does not
2656     // match the callee's return type, we also need to change the return type of
2657     // the intrinsic.
2658     if (Caller->getReturnType() == CB.getType()) {
2659       llvm::erase_if(Returns, [](ReturnInst *RI) {
2660         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2661       });
2662     } else {
2663       SmallVector<ReturnInst *, 8> NormalReturns;
2664       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2665           Caller->getParent(), Intrinsic::experimental_deoptimize,
2666           {Caller->getReturnType()});
2667 
2668       for (ReturnInst *RI : Returns) {
2669         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2670         if (!DeoptCall) {
2671           NormalReturns.push_back(RI);
2672           continue;
2673         }
2674 
2675         // The calling convention on the deoptimize call itself may be bogus,
2676         // since the code we're inlining may have undefined behavior (and may
2677         // never actually execute at runtime); but all
2678         // @llvm.experimental.deoptimize declarations have to have the same
2679         // calling convention in a well-formed module.
2680         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2681         NewDeoptIntrinsic->setCallingConv(CallingConv);
2682         auto *CurBB = RI->getParent();
2683         RI->eraseFromParent();
2684 
2685         SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2686 
2687         SmallVector<OperandBundleDef, 1> OpBundles;
2688         DeoptCall->getOperandBundlesAsDefs(OpBundles);
2689         auto DeoptAttributes = DeoptCall->getAttributes();
2690         DeoptCall->eraseFromParent();
2691         assert(!OpBundles.empty() &&
2692                "Expected at least the deopt operand bundle");
2693 
2694         IRBuilder<> Builder(CurBB);
2695         CallInst *NewDeoptCall =
2696             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2697         NewDeoptCall->setCallingConv(CallingConv);
2698         NewDeoptCall->setAttributes(DeoptAttributes);
2699         if (NewDeoptCall->getType()->isVoidTy())
2700           Builder.CreateRetVoid();
2701         else
2702           Builder.CreateRet(NewDeoptCall);
2703         // Since the ret type is changed, remove the incompatible attributes.
2704         NewDeoptCall->removeRetAttrs(
2705             AttributeFuncs::typeIncompatible(NewDeoptCall->getType()));
2706       }
2707 
2708       // Leave behind the normal returns so we can merge control flow.
2709       std::swap(Returns, NormalReturns);
2710     }
2711   }
2712 
2713   // Handle any inlined musttail call sites.  In order for a new call site to be
2714   // musttail, the source of the clone and the inlined call site must have been
2715   // musttail.  Therefore it's safe to return without merging control into the
2716   // phi below.
2717   if (InlinedMustTailCalls) {
2718     // Check if we need to bitcast the result of any musttail calls.
2719     Type *NewRetTy = Caller->getReturnType();
2720     bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2721 
2722     // Handle the returns preceded by musttail calls separately.
2723     SmallVector<ReturnInst *, 8> NormalReturns;
2724     for (ReturnInst *RI : Returns) {
2725       CallInst *ReturnedMustTail =
2726           RI->getParent()->getTerminatingMustTailCall();
2727       if (!ReturnedMustTail) {
2728         NormalReturns.push_back(RI);
2729         continue;
2730       }
2731       if (!NeedBitCast)
2732         continue;
2733 
2734       // Delete the old return and any preceding bitcast.
2735       BasicBlock *CurBB = RI->getParent();
2736       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2737       RI->eraseFromParent();
2738       if (OldCast)
2739         OldCast->eraseFromParent();
2740 
2741       // Insert a new bitcast and return with the right type.
2742       IRBuilder<> Builder(CurBB);
2743       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2744     }
2745 
2746     // Leave behind the normal returns so we can merge control flow.
2747     std::swap(Returns, NormalReturns);
2748   }
2749 
2750   // Now that all of the transforms on the inlined code have taken place but
2751   // before we splice the inlined code into the CFG and lose track of which
2752   // blocks were actually inlined, collect the call sites. We only do this if
2753   // call graph updates weren't requested, as those provide value handle based
2754   // tracking of inlined call sites instead. Calls to intrinsics are not
2755   // collected because they are not inlineable.
2756   if (InlinedFunctionInfo.ContainsCalls) {
2757     // Otherwise just collect the raw call sites that were inlined.
2758     for (BasicBlock &NewBB :
2759          make_range(FirstNewBlock->getIterator(), Caller->end()))
2760       for (Instruction &I : NewBB)
2761         if (auto *CB = dyn_cast<CallBase>(&I))
2762           if (!(CB->getCalledFunction() &&
2763                 CB->getCalledFunction()->isIntrinsic()))
2764             IFI.InlinedCallSites.push_back(CB);
2765   }
2766 
2767   // If we cloned in _exactly one_ basic block, and if that block ends in a
2768   // return instruction, we splice the body of the inlined callee directly into
2769   // the calling basic block.
2770   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2771     // Move all of the instructions right before the call.
2772     OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),
2773                    FirstNewBlock->end());
2774     // Remove the cloned basic block.
2775     Caller->back().eraseFromParent();
2776 
2777     // If the call site was an invoke instruction, add a branch to the normal
2778     // destination.
2779     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2780       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2781       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2782     }
2783 
2784     // If the return instruction returned a value, replace uses of the call with
2785     // uses of the returned value.
2786     if (!CB.use_empty()) {
2787       ReturnInst *R = Returns[0];
2788       if (&CB == R->getReturnValue())
2789         CB.replaceAllUsesWith(PoisonValue::get(CB.getType()));
2790       else
2791         CB.replaceAllUsesWith(R->getReturnValue());
2792     }
2793     // Since we are now done with the Call/Invoke, we can delete it.
2794     CB.eraseFromParent();
2795 
2796     // Since we are now done with the return instruction, delete it also.
2797     Returns[0]->eraseFromParent();
2798 
2799     if (MergeAttributes)
2800       AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
2801 
2802     // We are now done with the inlining.
2803     return InlineResult::success();
2804   }
2805 
2806   // Otherwise, we have the normal case, of more than one block to inline or
2807   // multiple return sites.
2808 
2809   // We want to clone the entire callee function into the hole between the
2810   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2811   // this is an invoke instruction or a call instruction.
2812   BasicBlock *AfterCallBB;
2813   BranchInst *CreatedBranchToNormalDest = nullptr;
2814   if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2815 
2816     // Add an unconditional branch to make this look like the CallInst case...
2817     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2818 
2819     // Split the basic block.  This guarantees that no PHI nodes will have to be
2820     // updated due to new incoming edges, and make the invoke case more
2821     // symmetric to the call case.
2822     AfterCallBB =
2823         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2824                                 CalledFunc->getName() + ".exit");
2825 
2826   } else { // It's a call
2827     // If this is a call instruction, we need to split the basic block that
2828     // the call lives in.
2829     //
2830     AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2831                                           CalledFunc->getName() + ".exit");
2832   }
2833 
2834   if (IFI.CallerBFI) {
2835     // Copy original BB's block frequency to AfterCallBB
2836     IFI.CallerBFI->setBlockFreq(AfterCallBB,
2837                                 IFI.CallerBFI->getBlockFreq(OrigBB));
2838   }
2839 
2840   // Change the branch that used to go to AfterCallBB to branch to the first
2841   // basic block of the inlined function.
2842   //
2843   Instruction *Br = OrigBB->getTerminator();
2844   assert(Br && Br->getOpcode() == Instruction::Br &&
2845          "splitBasicBlock broken!");
2846   Br->setOperand(0, &*FirstNewBlock);
2847 
2848   // Now that the function is correct, make it a little bit nicer.  In
2849   // particular, move the basic blocks inserted from the end of the function
2850   // into the space made by splitting the source basic block.
2851   Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,
2852                  Caller->end());
2853 
2854   // Handle all of the return instructions that we just cloned in, and eliminate
2855   // any users of the original call/invoke instruction.
2856   Type *RTy = CalledFunc->getReturnType();
2857 
2858   PHINode *PHI = nullptr;
2859   if (Returns.size() > 1) {
2860     // The PHI node should go at the front of the new basic block to merge all
2861     // possible incoming values.
2862     if (!CB.use_empty()) {
2863       PHI = PHINode::Create(RTy, Returns.size(), CB.getName());
2864       PHI->insertBefore(AfterCallBB->begin());
2865       // Anything that used the result of the function call should now use the
2866       // PHI node as their operand.
2867       CB.replaceAllUsesWith(PHI);
2868     }
2869 
2870     // Loop over all of the return instructions adding entries to the PHI node
2871     // as appropriate.
2872     if (PHI) {
2873       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2874         ReturnInst *RI = Returns[i];
2875         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2876                "Ret value not consistent in function!");
2877         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2878       }
2879     }
2880 
2881     // Add a branch to the merge points and remove return instructions.
2882     DebugLoc Loc;
2883     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2884       ReturnInst *RI = Returns[i];
2885       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2886       Loc = RI->getDebugLoc();
2887       BI->setDebugLoc(Loc);
2888       RI->eraseFromParent();
2889     }
2890     // We need to set the debug location to *somewhere* inside the
2891     // inlined function. The line number may be nonsensical, but the
2892     // instruction will at least be associated with the right
2893     // function.
2894     if (CreatedBranchToNormalDest)
2895       CreatedBranchToNormalDest->setDebugLoc(Loc);
2896   } else if (!Returns.empty()) {
2897     // Otherwise, if there is exactly one return value, just replace anything
2898     // using the return value of the call with the computed value.
2899     if (!CB.use_empty()) {
2900       if (&CB == Returns[0]->getReturnValue())
2901         CB.replaceAllUsesWith(PoisonValue::get(CB.getType()));
2902       else
2903         CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2904     }
2905 
2906     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2907     BasicBlock *ReturnBB = Returns[0]->getParent();
2908     ReturnBB->replaceAllUsesWith(AfterCallBB);
2909 
2910     // Splice the code from the return block into the block that it will return
2911     // to, which contains the code that was after the call.
2912     AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);
2913 
2914     if (CreatedBranchToNormalDest)
2915       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2916 
2917     // Delete the return instruction now and empty ReturnBB now.
2918     Returns[0]->eraseFromParent();
2919     ReturnBB->eraseFromParent();
2920   } else if (!CB.use_empty()) {
2921     // No returns, but something is using the return value of the call.  Just
2922     // nuke the result.
2923     CB.replaceAllUsesWith(PoisonValue::get(CB.getType()));
2924   }
2925 
2926   // Since we are now done with the Call/Invoke, we can delete it.
2927   CB.eraseFromParent();
2928 
2929   // If we inlined any musttail calls and the original return is now
2930   // unreachable, delete it.  It can only contain a bitcast and ret.
2931   if (InlinedMustTailCalls && pred_empty(AfterCallBB))
2932     AfterCallBB->eraseFromParent();
2933 
2934   // We should always be able to fold the entry block of the function into the
2935   // single predecessor of the block...
2936   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2937   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2938 
2939   // Splice the code entry block into calling block, right before the
2940   // unconditional branch.
2941   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2942   OrigBB->splice(Br->getIterator(), CalleeEntry);
2943 
2944   // Remove the unconditional branch.
2945   Br->eraseFromParent();
2946 
2947   // Now we can remove the CalleeEntry block, which is now empty.
2948   CalleeEntry->eraseFromParent();
2949 
2950   // If we inserted a phi node, check to see if it has a single value (e.g. all
2951   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2952   // block other optimizations.
2953   if (PHI) {
2954     AssumptionCache *AC =
2955         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2956     auto &DL = Caller->getParent()->getDataLayout();
2957     if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2958       PHI->replaceAllUsesWith(V);
2959       PHI->eraseFromParent();
2960     }
2961   }
2962 
2963   if (MergeAttributes)
2964     AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
2965 
2966   return InlineResult::success();
2967 }
2968