xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements interprocedural passes which walk the
11 /// call-graph deducing and/or propagating function attributes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/FunctionAttrs.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/BasicAliasAnalysis.h"
27 #include "llvm/Analysis/CFG.h"
28 #include "llvm/Analysis/CGSCCPassManager.h"
29 #include "llvm/Analysis/CallGraph.h"
30 #include "llvm/Analysis/CallGraphSCCPass.h"
31 #include "llvm/Analysis/CaptureTracking.h"
32 #include "llvm/Analysis/LazyCallGraph.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/MemoryLocation.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InstIterator.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/Use.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Transforms/IPO.h"
62 #include "llvm/Transforms/Utils/Local.h"
63 #include <cassert>
64 #include <iterator>
65 #include <map>
66 #include <vector>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "function-attrs"
71 
72 STATISTIC(NumReadNone, "Number of functions marked readnone");
73 STATISTIC(NumReadOnly, "Number of functions marked readonly");
74 STATISTIC(NumWriteOnly, "Number of functions marked writeonly");
75 STATISTIC(NumNoCapture, "Number of arguments marked nocapture");
76 STATISTIC(NumReturned, "Number of arguments marked returned");
77 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone");
78 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly");
79 STATISTIC(NumNoAlias, "Number of function returns marked noalias");
80 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull");
81 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse");
82 STATISTIC(NumNoUnwind, "Number of functions marked as nounwind");
83 STATISTIC(NumNoFree, "Number of functions marked as nofree");
84 STATISTIC(NumWillReturn, "Number of functions marked as willreturn");
85 STATISTIC(NumNoSync, "Number of functions marked as nosync");
86 
87 STATISTIC(NumThinLinkNoRecurse,
88           "Number of functions marked as norecurse during thinlink");
89 STATISTIC(NumThinLinkNoUnwind,
90           "Number of functions marked as nounwind during thinlink");
91 
92 static cl::opt<bool> EnableNonnullArgPropagation(
93     "enable-nonnull-arg-prop", cl::init(true), cl::Hidden,
94     cl::desc("Try to propagate nonnull argument attributes from callsites to "
95              "caller functions."));
96 
97 static cl::opt<bool> DisableNoUnwindInference(
98     "disable-nounwind-inference", cl::Hidden,
99     cl::desc("Stop inferring nounwind attribute during function-attrs pass"));
100 
101 static cl::opt<bool> DisableNoFreeInference(
102     "disable-nofree-inference", cl::Hidden,
103     cl::desc("Stop inferring nofree attribute during function-attrs pass"));
104 
105 static cl::opt<bool> DisableThinLTOPropagation(
106     "disable-thinlto-funcattrs", cl::init(true), cl::Hidden,
107     cl::desc("Don't propagate function-attrs in thinLTO"));
108 
109 namespace {
110 
111 using SCCNodeSet = SmallSetVector<Function *, 8>;
112 
113 } // end anonymous namespace
114 
115 /// Returns the memory access attribute for function F using AAR for AA results,
116 /// where SCCNodes is the current SCC.
117 ///
118 /// If ThisBody is true, this function may examine the function body and will
119 /// return a result pertaining to this copy of the function. If it is false, the
120 /// result will be based only on AA results for the function declaration; it
121 /// will be assumed that some other (perhaps less optimized) version of the
122 /// function may be selected at link time.
123 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
124                                                   AAResults &AAR,
125                                                   const SCCNodeSet &SCCNodes) {
126   FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F);
127   if (MRB == FMRB_DoesNotAccessMemory)
128     // Already perfect!
129     return MAK_ReadNone;
130 
131   if (!ThisBody) {
132     if (AliasAnalysis::onlyReadsMemory(MRB))
133       return MAK_ReadOnly;
134 
135     if (AliasAnalysis::doesNotReadMemory(MRB))
136       return MAK_WriteOnly;
137 
138     // Conservatively assume it reads and writes to memory.
139     return MAK_MayWrite;
140   }
141 
142   // Scan the function body for instructions that may read or write memory.
143   bool ReadsMemory = false;
144   bool WritesMemory = false;
145   for (Instruction &I : instructions(F)) {
146     // Some instructions can be ignored even if they read or write memory.
147     // Detect these now, skipping to the next instruction if one is found.
148     if (auto *Call = dyn_cast<CallBase>(&I)) {
149       // Ignore calls to functions in the same SCC, as long as the call sites
150       // don't have operand bundles.  Calls with operand bundles are allowed to
151       // have memory effects not described by the memory effects of the call
152       // target.
153       if (!Call->hasOperandBundles() && Call->getCalledFunction() &&
154           SCCNodes.count(Call->getCalledFunction()))
155         continue;
156       FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call);
157       ModRefInfo MRI = createModRefInfo(MRB);
158 
159       // If the call doesn't access memory, we're done.
160       if (isNoModRef(MRI))
161         continue;
162 
163       // A pseudo probe call shouldn't change any function attribute since it
164       // doesn't translate to a real instruction. It comes with a memory access
165       // tag to prevent itself being removed by optimizations and not block
166       // other instructions being optimized.
167       if (isa<PseudoProbeInst>(I))
168         continue;
169 
170       if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) {
171         // The call could access any memory. If that includes writes, note it.
172         if (isModSet(MRI))
173           WritesMemory = true;
174         // If it reads, note it.
175         if (isRefSet(MRI))
176           ReadsMemory = true;
177         continue;
178       }
179 
180       // Check whether all pointer arguments point to local memory, and
181       // ignore calls that only access local memory.
182       for (const Use &U : Call->args()) {
183         const Value *Arg = U;
184         if (!Arg->getType()->isPtrOrPtrVectorTy())
185           continue;
186 
187         MemoryLocation Loc =
188             MemoryLocation::getBeforeOrAfter(Arg, I.getAAMetadata());
189 
190         // Skip accesses to local or constant memory as they don't impact the
191         // externally visible mod/ref behavior.
192         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
193           continue;
194 
195         if (isModSet(MRI))
196           // Writes non-local memory.
197           WritesMemory = true;
198         if (isRefSet(MRI))
199           // Ok, it reads non-local memory.
200           ReadsMemory = true;
201       }
202       continue;
203     } else if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
204       // Ignore non-volatile loads from local memory. (Atomic is okay here.)
205       if (!LI->isVolatile()) {
206         MemoryLocation Loc = MemoryLocation::get(LI);
207         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
208           continue;
209       }
210     } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
211       // Ignore non-volatile stores to local memory. (Atomic is okay here.)
212       if (!SI->isVolatile()) {
213         MemoryLocation Loc = MemoryLocation::get(SI);
214         if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
215           continue;
216       }
217     } else if (VAArgInst *VI = dyn_cast<VAArgInst>(&I)) {
218       // Ignore vaargs on local memory.
219       MemoryLocation Loc = MemoryLocation::get(VI);
220       if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
221         continue;
222     }
223 
224     // Any remaining instructions need to be taken seriously!  Check if they
225     // read or write memory.
226     //
227     // Writes memory, remember that.
228     WritesMemory |= I.mayWriteToMemory();
229 
230     // If this instruction may read memory, remember that.
231     ReadsMemory |= I.mayReadFromMemory();
232   }
233 
234   if (WritesMemory) {
235     if (!ReadsMemory)
236       return MAK_WriteOnly;
237     else
238       return MAK_MayWrite;
239   }
240 
241   return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone;
242 }
243 
244 MemoryAccessKind llvm::computeFunctionBodyMemoryAccess(Function &F,
245                                                        AAResults &AAR) {
246   return checkFunctionMemoryAccess(F, /*ThisBody=*/true, AAR, {});
247 }
248 
249 /// Deduce readonly/readnone attributes for the SCC.
250 template <typename AARGetterT>
251 static void addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter,
252                          SmallSet<Function *, 8> &Changed) {
253   // Check if any of the functions in the SCC read or write memory.  If they
254   // write memory then they can't be marked readnone or readonly.
255   bool ReadsMemory = false;
256   bool WritesMemory = false;
257   for (Function *F : SCCNodes) {
258     // Call the callable parameter to look up AA results for this function.
259     AAResults &AAR = AARGetter(*F);
260 
261     // Non-exact function definitions may not be selected at link time, and an
262     // alternative version that writes to memory may be selected.  See the
263     // comment on GlobalValue::isDefinitionExact for more details.
264     switch (checkFunctionMemoryAccess(*F, F->hasExactDefinition(),
265                                       AAR, SCCNodes)) {
266     case MAK_MayWrite:
267       return;
268     case MAK_ReadOnly:
269       ReadsMemory = true;
270       break;
271     case MAK_WriteOnly:
272       WritesMemory = true;
273       break;
274     case MAK_ReadNone:
275       // Nothing to do!
276       break;
277     }
278   }
279 
280   // If the SCC contains both functions that read and functions that write, then
281   // we cannot add readonly attributes.
282   if (ReadsMemory && WritesMemory)
283     return;
284 
285   // Success!  Functions in this SCC do not access memory, or only read memory.
286   // Give them the appropriate attribute.
287 
288   for (Function *F : SCCNodes) {
289     if (F->doesNotAccessMemory())
290       // Already perfect!
291       continue;
292 
293     if (F->onlyReadsMemory() && ReadsMemory)
294       // No change.
295       continue;
296 
297     if (F->doesNotReadMemory() && WritesMemory)
298       continue;
299 
300     Changed.insert(F);
301 
302     // Clear out any existing attributes.
303     AttrBuilder AttrsToRemove;
304     AttrsToRemove.addAttribute(Attribute::ReadOnly);
305     AttrsToRemove.addAttribute(Attribute::ReadNone);
306     AttrsToRemove.addAttribute(Attribute::WriteOnly);
307 
308     if (!WritesMemory && !ReadsMemory) {
309       // Clear out any "access range attributes" if readnone was deduced.
310       AttrsToRemove.addAttribute(Attribute::ArgMemOnly);
311       AttrsToRemove.addAttribute(Attribute::InaccessibleMemOnly);
312       AttrsToRemove.addAttribute(Attribute::InaccessibleMemOrArgMemOnly);
313     }
314     F->removeFnAttrs(AttrsToRemove);
315 
316     // Add in the new attribute.
317     if (WritesMemory && !ReadsMemory)
318       F->addFnAttr(Attribute::WriteOnly);
319     else
320       F->addFnAttr(ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone);
321 
322     if (WritesMemory && !ReadsMemory)
323       ++NumWriteOnly;
324     else if (ReadsMemory)
325       ++NumReadOnly;
326     else
327       ++NumReadNone;
328   }
329 }
330 
331 // Compute definitive function attributes for a function taking into account
332 // prevailing definitions and linkage types
333 static FunctionSummary *calculatePrevailingSummary(
334     ValueInfo VI,
335     DenseMap<ValueInfo, FunctionSummary *> &CachedPrevailingSummary,
336     function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
337         IsPrevailing) {
338 
339   if (CachedPrevailingSummary.count(VI))
340     return CachedPrevailingSummary[VI];
341 
342   /// At this point, prevailing symbols have been resolved. The following leads
343   /// to returning a conservative result:
344   /// - Multiple instances with local linkage. Normally local linkage would be
345   ///   unique per module
346   ///   as the GUID includes the module path. We could have a guid alias if
347   ///   there wasn't any distinguishing path when each file was compiled, but
348   ///   that should be rare so we'll punt on those.
349 
350   /// These next 2 cases should not happen and will assert:
351   /// - Multiple instances with external linkage. This should be caught in
352   ///   symbol resolution
353   /// - Non-existent FunctionSummary for Aliasee. This presents a hole in our
354   ///   knowledge meaning we have to go conservative.
355 
356   /// Otherwise, we calculate attributes for a function as:
357   ///   1. If we have a local linkage, take its attributes. If there's somehow
358   ///      multiple, bail and go conservative.
359   ///   2. If we have an external/WeakODR/LinkOnceODR linkage check that it is
360   ///      prevailing, take its attributes.
361   ///   3. If we have a Weak/LinkOnce linkage the copies can have semantic
362   ///      differences. However, if the prevailing copy is known it will be used
363   ///      so take its attributes. If the prevailing copy is in a native file
364   ///      all IR copies will be dead and propagation will go conservative.
365   ///   4. AvailableExternally summaries without a prevailing copy are known to
366   ///      occur in a couple of circumstances:
367   ///      a. An internal function gets imported due to its caller getting
368   ///         imported, it becomes AvailableExternally but no prevailing
369   ///         definition exists. Because it has to get imported along with its
370   ///         caller the attributes will be captured by propagating on its
371   ///         caller.
372   ///      b. C++11 [temp.explicit]p10 can generate AvailableExternally
373   ///         definitions of explicitly instanced template declarations
374   ///         for inlining which are ultimately dropped from the TU. Since this
375   ///         is localized to the TU the attributes will have already made it to
376   ///         the callers.
377   ///      These are edge cases and already captured by their callers so we
378   ///      ignore these for now. If they become relevant to optimize in the
379   ///      future this can be revisited.
380   ///   5. Otherwise, go conservative.
381 
382   CachedPrevailingSummary[VI] = nullptr;
383   FunctionSummary *Local = nullptr;
384   FunctionSummary *Prevailing = nullptr;
385 
386   for (const auto &GVS : VI.getSummaryList()) {
387     if (!GVS->isLive())
388       continue;
389 
390     FunctionSummary *FS = dyn_cast<FunctionSummary>(GVS->getBaseObject());
391     // Virtual and Unknown (e.g. indirect) calls require going conservative
392     if (!FS || FS->fflags().HasUnknownCall)
393       return nullptr;
394 
395     const auto &Linkage = GVS->linkage();
396     if (GlobalValue::isLocalLinkage(Linkage)) {
397       if (Local) {
398         LLVM_DEBUG(
399             dbgs()
400             << "ThinLTO FunctionAttrs: Multiple Local Linkage, bailing on "
401                "function "
402             << VI.name() << " from " << FS->modulePath() << ". Previous module "
403             << Local->modulePath() << "\n");
404         return nullptr;
405       }
406       Local = FS;
407     } else if (GlobalValue::isExternalLinkage(Linkage)) {
408       assert(IsPrevailing(VI.getGUID(), GVS.get()));
409       Prevailing = FS;
410       break;
411     } else if (GlobalValue::isWeakODRLinkage(Linkage) ||
412                GlobalValue::isLinkOnceODRLinkage(Linkage) ||
413                GlobalValue::isWeakAnyLinkage(Linkage) ||
414                GlobalValue::isLinkOnceAnyLinkage(Linkage)) {
415       if (IsPrevailing(VI.getGUID(), GVS.get())) {
416         Prevailing = FS;
417         break;
418       }
419     } else if (GlobalValue::isAvailableExternallyLinkage(Linkage)) {
420       // TODO: Handle these cases if they become meaningful
421       continue;
422     }
423   }
424 
425   if (Local) {
426     assert(!Prevailing);
427     CachedPrevailingSummary[VI] = Local;
428   } else if (Prevailing) {
429     assert(!Local);
430     CachedPrevailingSummary[VI] = Prevailing;
431   }
432 
433   return CachedPrevailingSummary[VI];
434 }
435 
436 bool llvm::thinLTOPropagateFunctionAttrs(
437     ModuleSummaryIndex &Index,
438     function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
439         IsPrevailing) {
440   // TODO: implement addNoAliasAttrs once
441   // there's more information about the return type in the summary
442   if (DisableThinLTOPropagation)
443     return false;
444 
445   DenseMap<ValueInfo, FunctionSummary *> CachedPrevailingSummary;
446   bool Changed = false;
447 
448   auto PropagateAttributes = [&](std::vector<ValueInfo> &SCCNodes) {
449     // Assume we can propagate unless we discover otherwise
450     FunctionSummary::FFlags InferredFlags;
451     InferredFlags.NoRecurse = (SCCNodes.size() == 1);
452     InferredFlags.NoUnwind = true;
453 
454     for (auto &V : SCCNodes) {
455       FunctionSummary *CallerSummary =
456           calculatePrevailingSummary(V, CachedPrevailingSummary, IsPrevailing);
457 
458       // Function summaries can fail to contain information such as declarations
459       if (!CallerSummary)
460         return;
461 
462       if (CallerSummary->fflags().MayThrow)
463         InferredFlags.NoUnwind = false;
464 
465       for (const auto &Callee : CallerSummary->calls()) {
466         FunctionSummary *CalleeSummary = calculatePrevailingSummary(
467             Callee.first, CachedPrevailingSummary, IsPrevailing);
468 
469         if (!CalleeSummary)
470           return;
471 
472         if (!CalleeSummary->fflags().NoRecurse)
473           InferredFlags.NoRecurse = false;
474 
475         if (!CalleeSummary->fflags().NoUnwind)
476           InferredFlags.NoUnwind = false;
477 
478         if (!InferredFlags.NoUnwind && !InferredFlags.NoRecurse)
479           break;
480       }
481     }
482 
483     if (InferredFlags.NoUnwind || InferredFlags.NoRecurse) {
484       Changed = true;
485       for (auto &V : SCCNodes) {
486         if (InferredFlags.NoRecurse) {
487           LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoRecurse to "
488                             << V.name() << "\n");
489           ++NumThinLinkNoRecurse;
490         }
491 
492         if (InferredFlags.NoUnwind) {
493           LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoUnwind to "
494                             << V.name() << "\n");
495           ++NumThinLinkNoUnwind;
496         }
497 
498         for (auto &S : V.getSummaryList()) {
499           if (auto *FS = dyn_cast<FunctionSummary>(S.get())) {
500             if (InferredFlags.NoRecurse)
501               FS->setNoRecurse();
502 
503             if (InferredFlags.NoUnwind)
504               FS->setNoUnwind();
505           }
506         }
507       }
508     }
509   };
510 
511   // Call propagation functions on each SCC in the Index
512   for (scc_iterator<ModuleSummaryIndex *> I = scc_begin(&Index); !I.isAtEnd();
513        ++I) {
514     std::vector<ValueInfo> Nodes(*I);
515     PropagateAttributes(Nodes);
516   }
517   return Changed;
518 }
519 
520 namespace {
521 
522 /// For a given pointer Argument, this retains a list of Arguments of functions
523 /// in the same SCC that the pointer data flows into. We use this to build an
524 /// SCC of the arguments.
525 struct ArgumentGraphNode {
526   Argument *Definition;
527   SmallVector<ArgumentGraphNode *, 4> Uses;
528 };
529 
530 class ArgumentGraph {
531   // We store pointers to ArgumentGraphNode objects, so it's important that
532   // that they not move around upon insert.
533   using ArgumentMapTy = std::map<Argument *, ArgumentGraphNode>;
534 
535   ArgumentMapTy ArgumentMap;
536 
537   // There is no root node for the argument graph, in fact:
538   //   void f(int *x, int *y) { if (...) f(x, y); }
539   // is an example where the graph is disconnected. The SCCIterator requires a
540   // single entry point, so we maintain a fake ("synthetic") root node that
541   // uses every node. Because the graph is directed and nothing points into
542   // the root, it will not participate in any SCCs (except for its own).
543   ArgumentGraphNode SyntheticRoot;
544 
545 public:
546   ArgumentGraph() { SyntheticRoot.Definition = nullptr; }
547 
548   using iterator = SmallVectorImpl<ArgumentGraphNode *>::iterator;
549 
550   iterator begin() { return SyntheticRoot.Uses.begin(); }
551   iterator end() { return SyntheticRoot.Uses.end(); }
552   ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; }
553 
554   ArgumentGraphNode *operator[](Argument *A) {
555     ArgumentGraphNode &Node = ArgumentMap[A];
556     Node.Definition = A;
557     SyntheticRoot.Uses.push_back(&Node);
558     return &Node;
559   }
560 };
561 
562 /// This tracker checks whether callees are in the SCC, and if so it does not
563 /// consider that a capture, instead adding it to the "Uses" list and
564 /// continuing with the analysis.
565 struct ArgumentUsesTracker : public CaptureTracker {
566   ArgumentUsesTracker(const SCCNodeSet &SCCNodes) : SCCNodes(SCCNodes) {}
567 
568   void tooManyUses() override { Captured = true; }
569 
570   bool captured(const Use *U) override {
571     CallBase *CB = dyn_cast<CallBase>(U->getUser());
572     if (!CB) {
573       Captured = true;
574       return true;
575     }
576 
577     Function *F = CB->getCalledFunction();
578     if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) {
579       Captured = true;
580       return true;
581     }
582 
583     // Note: the callee and the two successor blocks *follow* the argument
584     // operands.  This means there is no need to adjust UseIndex to account for
585     // these.
586 
587     unsigned UseIndex =
588         std::distance(const_cast<const Use *>(CB->arg_begin()), U);
589 
590     assert(UseIndex < CB->data_operands_size() &&
591            "Indirect function calls should have been filtered above!");
592 
593     if (UseIndex >= CB->arg_size()) {
594       // Data operand, but not a argument operand -- must be a bundle operand
595       assert(CB->hasOperandBundles() && "Must be!");
596 
597       // CaptureTracking told us that we're being captured by an operand bundle
598       // use.  In this case it does not matter if the callee is within our SCC
599       // or not -- we've been captured in some unknown way, and we have to be
600       // conservative.
601       Captured = true;
602       return true;
603     }
604 
605     if (UseIndex >= F->arg_size()) {
606       assert(F->isVarArg() && "More params than args in non-varargs call");
607       Captured = true;
608       return true;
609     }
610 
611     Uses.push_back(&*std::next(F->arg_begin(), UseIndex));
612     return false;
613   }
614 
615   // True only if certainly captured (used outside our SCC).
616   bool Captured = false;
617 
618   // Uses within our SCC.
619   SmallVector<Argument *, 4> Uses;
620 
621   const SCCNodeSet &SCCNodes;
622 };
623 
624 } // end anonymous namespace
625 
626 namespace llvm {
627 
628 template <> struct GraphTraits<ArgumentGraphNode *> {
629   using NodeRef = ArgumentGraphNode *;
630   using ChildIteratorType = SmallVectorImpl<ArgumentGraphNode *>::iterator;
631 
632   static NodeRef getEntryNode(NodeRef A) { return A; }
633   static ChildIteratorType child_begin(NodeRef N) { return N->Uses.begin(); }
634   static ChildIteratorType child_end(NodeRef N) { return N->Uses.end(); }
635 };
636 
637 template <>
638 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> {
639   static NodeRef getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); }
640 
641   static ChildIteratorType nodes_begin(ArgumentGraph *AG) {
642     return AG->begin();
643   }
644 
645   static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); }
646 };
647 
648 } // end namespace llvm
649 
650 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
651 static Attribute::AttrKind
652 determinePointerReadAttrs(Argument *A,
653                           const SmallPtrSet<Argument *, 8> &SCCNodes) {
654   SmallVector<Use *, 32> Worklist;
655   SmallPtrSet<Use *, 32> Visited;
656 
657   // inalloca arguments are always clobbered by the call.
658   if (A->hasInAllocaAttr() || A->hasPreallocatedAttr())
659     return Attribute::None;
660 
661   bool IsRead = false;
662   // We don't need to track IsWritten. If A is written to, return immediately.
663 
664   for (Use &U : A->uses()) {
665     Visited.insert(&U);
666     Worklist.push_back(&U);
667   }
668 
669   while (!Worklist.empty()) {
670     Use *U = Worklist.pop_back_val();
671     Instruction *I = cast<Instruction>(U->getUser());
672 
673     switch (I->getOpcode()) {
674     case Instruction::BitCast:
675     case Instruction::GetElementPtr:
676     case Instruction::PHI:
677     case Instruction::Select:
678     case Instruction::AddrSpaceCast:
679       // The original value is not read/written via this if the new value isn't.
680       for (Use &UU : I->uses())
681         if (Visited.insert(&UU).second)
682           Worklist.push_back(&UU);
683       break;
684 
685     case Instruction::Call:
686     case Instruction::Invoke: {
687       bool Captures = true;
688 
689       if (I->getType()->isVoidTy())
690         Captures = false;
691 
692       auto AddUsersToWorklistIfCapturing = [&] {
693         if (Captures)
694           for (Use &UU : I->uses())
695             if (Visited.insert(&UU).second)
696               Worklist.push_back(&UU);
697       };
698 
699       CallBase &CB = cast<CallBase>(*I);
700       if (CB.doesNotAccessMemory()) {
701         AddUsersToWorklistIfCapturing();
702         continue;
703       }
704 
705       Function *F = CB.getCalledFunction();
706       if (!F) {
707         if (CB.onlyReadsMemory()) {
708           IsRead = true;
709           AddUsersToWorklistIfCapturing();
710           continue;
711         }
712         return Attribute::None;
713       }
714 
715       // Note: the callee and the two successor blocks *follow* the argument
716       // operands.  This means there is no need to adjust UseIndex to account
717       // for these.
718 
719       unsigned UseIndex = std::distance(CB.arg_begin(), U);
720 
721       // U cannot be the callee operand use: since we're exploring the
722       // transitive uses of an Argument, having such a use be a callee would
723       // imply the call site is an indirect call or invoke; and we'd take the
724       // early exit above.
725       assert(UseIndex < CB.data_operands_size() &&
726              "Data operand use expected!");
727 
728       bool IsOperandBundleUse = UseIndex >= CB.arg_size();
729 
730       if (UseIndex >= F->arg_size() && !IsOperandBundleUse) {
731         assert(F->isVarArg() && "More params than args in non-varargs call");
732         return Attribute::None;
733       }
734 
735       Captures &= !CB.doesNotCapture(UseIndex);
736 
737       // Since the optimizer (by design) cannot see the data flow corresponding
738       // to a operand bundle use, these cannot participate in the optimistic SCC
739       // analysis.  Instead, we model the operand bundle uses as arguments in
740       // call to a function external to the SCC.
741       if (IsOperandBundleUse ||
742           !SCCNodes.count(&*std::next(F->arg_begin(), UseIndex))) {
743 
744         // The accessors used on call site here do the right thing for calls and
745         // invokes with operand bundles.
746 
747         if (!CB.onlyReadsMemory() && !CB.onlyReadsMemory(UseIndex))
748           return Attribute::None;
749         if (!CB.doesNotAccessMemory(UseIndex))
750           IsRead = true;
751       }
752 
753       AddUsersToWorklistIfCapturing();
754       break;
755     }
756 
757     case Instruction::Load:
758       // A volatile load has side effects beyond what readonly can be relied
759       // upon.
760       if (cast<LoadInst>(I)->isVolatile())
761         return Attribute::None;
762 
763       IsRead = true;
764       break;
765 
766     case Instruction::ICmp:
767     case Instruction::Ret:
768       break;
769 
770     default:
771       return Attribute::None;
772     }
773   }
774 
775   return IsRead ? Attribute::ReadOnly : Attribute::ReadNone;
776 }
777 
778 /// Deduce returned attributes for the SCC.
779 static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes,
780                                      SmallSet<Function *, 8> &Changed) {
781   // Check each function in turn, determining if an argument is always returned.
782   for (Function *F : SCCNodes) {
783     // We can infer and propagate function attributes only when we know that the
784     // definition we'll get at link time is *exactly* the definition we see now.
785     // For more details, see GlobalValue::mayBeDerefined.
786     if (!F->hasExactDefinition())
787       continue;
788 
789     if (F->getReturnType()->isVoidTy())
790       continue;
791 
792     // There is nothing to do if an argument is already marked as 'returned'.
793     if (llvm::any_of(F->args(),
794                      [](const Argument &Arg) { return Arg.hasReturnedAttr(); }))
795       continue;
796 
797     auto FindRetArg = [&]() -> Value * {
798       Value *RetArg = nullptr;
799       for (BasicBlock &BB : *F)
800         if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) {
801           // Note that stripPointerCasts should look through functions with
802           // returned arguments.
803           Value *RetVal = Ret->getReturnValue()->stripPointerCasts();
804           if (!isa<Argument>(RetVal) || RetVal->getType() != F->getReturnType())
805             return nullptr;
806 
807           if (!RetArg)
808             RetArg = RetVal;
809           else if (RetArg != RetVal)
810             return nullptr;
811         }
812 
813       return RetArg;
814     };
815 
816     if (Value *RetArg = FindRetArg()) {
817       auto *A = cast<Argument>(RetArg);
818       A->addAttr(Attribute::Returned);
819       ++NumReturned;
820       Changed.insert(F);
821     }
822   }
823 }
824 
825 /// If a callsite has arguments that are also arguments to the parent function,
826 /// try to propagate attributes from the callsite's arguments to the parent's
827 /// arguments. This may be important because inlining can cause information loss
828 /// when attribute knowledge disappears with the inlined call.
829 static bool addArgumentAttrsFromCallsites(Function &F) {
830   if (!EnableNonnullArgPropagation)
831     return false;
832 
833   bool Changed = false;
834 
835   // For an argument attribute to transfer from a callsite to the parent, the
836   // call must be guaranteed to execute every time the parent is called.
837   // Conservatively, just check for calls in the entry block that are guaranteed
838   // to execute.
839   // TODO: This could be enhanced by testing if the callsite post-dominates the
840   // entry block or by doing simple forward walks or backward walks to the
841   // callsite.
842   BasicBlock &Entry = F.getEntryBlock();
843   for (Instruction &I : Entry) {
844     if (auto *CB = dyn_cast<CallBase>(&I)) {
845       if (auto *CalledFunc = CB->getCalledFunction()) {
846         for (auto &CSArg : CalledFunc->args()) {
847           if (!CSArg.hasNonNullAttr(/* AllowUndefOrPoison */ false))
848             continue;
849 
850           // If the non-null callsite argument operand is an argument to 'F'
851           // (the caller) and the call is guaranteed to execute, then the value
852           // must be non-null throughout 'F'.
853           auto *FArg = dyn_cast<Argument>(CB->getArgOperand(CSArg.getArgNo()));
854           if (FArg && !FArg->hasNonNullAttr()) {
855             FArg->addAttr(Attribute::NonNull);
856             Changed = true;
857           }
858         }
859       }
860     }
861     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
862       break;
863   }
864 
865   return Changed;
866 }
867 
868 static bool addReadAttr(Argument *A, Attribute::AttrKind R) {
869   assert((R == Attribute::ReadOnly || R == Attribute::ReadNone)
870          && "Must be a Read attribute.");
871   assert(A && "Argument must not be null.");
872 
873   // If the argument already has the attribute, nothing needs to be done.
874   if (A->hasAttribute(R))
875       return false;
876 
877   // Otherwise, remove potentially conflicting attribute, add the new one,
878   // and update statistics.
879   A->removeAttr(Attribute::WriteOnly);
880   A->removeAttr(Attribute::ReadOnly);
881   A->removeAttr(Attribute::ReadNone);
882   A->addAttr(R);
883   R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg;
884   return true;
885 }
886 
887 /// Deduce nocapture attributes for the SCC.
888 static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
889                              SmallSet<Function *, 8> &Changed) {
890   ArgumentGraph AG;
891 
892   // Check each function in turn, determining which pointer arguments are not
893   // captured.
894   for (Function *F : SCCNodes) {
895     // We can infer and propagate function attributes only when we know that the
896     // definition we'll get at link time is *exactly* the definition we see now.
897     // For more details, see GlobalValue::mayBeDerefined.
898     if (!F->hasExactDefinition())
899       continue;
900 
901     if (addArgumentAttrsFromCallsites(*F))
902       Changed.insert(F);
903 
904     // Functions that are readonly (or readnone) and nounwind and don't return
905     // a value can't capture arguments. Don't analyze them.
906     if (F->onlyReadsMemory() && F->doesNotThrow() &&
907         F->getReturnType()->isVoidTy()) {
908       for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
909            ++A) {
910         if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
911           A->addAttr(Attribute::NoCapture);
912           ++NumNoCapture;
913           Changed.insert(F);
914         }
915       }
916       continue;
917     }
918 
919     for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
920          ++A) {
921       if (!A->getType()->isPointerTy())
922         continue;
923       bool HasNonLocalUses = false;
924       if (!A->hasNoCaptureAttr()) {
925         ArgumentUsesTracker Tracker(SCCNodes);
926         PointerMayBeCaptured(&*A, &Tracker);
927         if (!Tracker.Captured) {
928           if (Tracker.Uses.empty()) {
929             // If it's trivially not captured, mark it nocapture now.
930             A->addAttr(Attribute::NoCapture);
931             ++NumNoCapture;
932             Changed.insert(F);
933           } else {
934             // If it's not trivially captured and not trivially not captured,
935             // then it must be calling into another function in our SCC. Save
936             // its particulars for Argument-SCC analysis later.
937             ArgumentGraphNode *Node = AG[&*A];
938             for (Argument *Use : Tracker.Uses) {
939               Node->Uses.push_back(AG[Use]);
940               if (Use != &*A)
941                 HasNonLocalUses = true;
942             }
943           }
944         }
945         // Otherwise, it's captured. Don't bother doing SCC analysis on it.
946       }
947       if (!HasNonLocalUses && !A->onlyReadsMemory()) {
948         // Can we determine that it's readonly/readnone without doing an SCC?
949         // Note that we don't allow any calls at all here, or else our result
950         // will be dependent on the iteration order through the functions in the
951         // SCC.
952         SmallPtrSet<Argument *, 8> Self;
953         Self.insert(&*A);
954         Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self);
955         if (R != Attribute::None)
956           if (addReadAttr(A, R))
957             Changed.insert(F);
958       }
959     }
960   }
961 
962   // The graph we've collected is partial because we stopped scanning for
963   // argument uses once we solved the argument trivially. These partial nodes
964   // show up as ArgumentGraphNode objects with an empty Uses list, and for
965   // these nodes the final decision about whether they capture has already been
966   // made.  If the definition doesn't have a 'nocapture' attribute by now, it
967   // captures.
968 
969   for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) {
970     const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I;
971     if (ArgumentSCC.size() == 1) {
972       if (!ArgumentSCC[0]->Definition)
973         continue; // synthetic root node
974 
975       // eg. "void f(int* x) { if (...) f(x); }"
976       if (ArgumentSCC[0]->Uses.size() == 1 &&
977           ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
978         Argument *A = ArgumentSCC[0]->Definition;
979         A->addAttr(Attribute::NoCapture);
980         ++NumNoCapture;
981         Changed.insert(A->getParent());
982       }
983       continue;
984     }
985 
986     bool SCCCaptured = false;
987     for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
988          I != E && !SCCCaptured; ++I) {
989       ArgumentGraphNode *Node = *I;
990       if (Node->Uses.empty()) {
991         if (!Node->Definition->hasNoCaptureAttr())
992           SCCCaptured = true;
993       }
994     }
995     if (SCCCaptured)
996       continue;
997 
998     SmallPtrSet<Argument *, 8> ArgumentSCCNodes;
999     // Fill ArgumentSCCNodes with the elements of the ArgumentSCC.  Used for
1000     // quickly looking up whether a given Argument is in this ArgumentSCC.
1001     for (ArgumentGraphNode *I : ArgumentSCC) {
1002       ArgumentSCCNodes.insert(I->Definition);
1003     }
1004 
1005     for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
1006          I != E && !SCCCaptured; ++I) {
1007       ArgumentGraphNode *N = *I;
1008       for (ArgumentGraphNode *Use : N->Uses) {
1009         Argument *A = Use->Definition;
1010         if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A))
1011           continue;
1012         SCCCaptured = true;
1013         break;
1014       }
1015     }
1016     if (SCCCaptured)
1017       continue;
1018 
1019     for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
1020       Argument *A = ArgumentSCC[i]->Definition;
1021       A->addAttr(Attribute::NoCapture);
1022       ++NumNoCapture;
1023       Changed.insert(A->getParent());
1024     }
1025 
1026     // We also want to compute readonly/readnone. With a small number of false
1027     // negatives, we can assume that any pointer which is captured isn't going
1028     // to be provably readonly or readnone, since by definition we can't
1029     // analyze all uses of a captured pointer.
1030     //
1031     // The false negatives happen when the pointer is captured by a function
1032     // that promises readonly/readnone behaviour on the pointer, then the
1033     // pointer's lifetime ends before anything that writes to arbitrary memory.
1034     // Also, a readonly/readnone pointer may be returned, but returning a
1035     // pointer is capturing it.
1036 
1037     Attribute::AttrKind ReadAttr = Attribute::ReadNone;
1038     for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
1039       Argument *A = ArgumentSCC[i]->Definition;
1040       Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes);
1041       if (K == Attribute::ReadNone)
1042         continue;
1043       if (K == Attribute::ReadOnly) {
1044         ReadAttr = Attribute::ReadOnly;
1045         continue;
1046       }
1047       ReadAttr = K;
1048       break;
1049     }
1050 
1051     if (ReadAttr != Attribute::None) {
1052       for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
1053         Argument *A = ArgumentSCC[i]->Definition;
1054         if (addReadAttr(A, ReadAttr))
1055           Changed.insert(A->getParent());
1056       }
1057     }
1058   }
1059 }
1060 
1061 /// Tests whether a function is "malloc-like".
1062 ///
1063 /// A function is "malloc-like" if it returns either null or a pointer that
1064 /// doesn't alias any other pointer visible to the caller.
1065 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
1066   SmallSetVector<Value *, 8> FlowsToReturn;
1067   for (BasicBlock &BB : *F)
1068     if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
1069       FlowsToReturn.insert(Ret->getReturnValue());
1070 
1071   for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
1072     Value *RetVal = FlowsToReturn[i];
1073 
1074     if (Constant *C = dyn_cast<Constant>(RetVal)) {
1075       if (!C->isNullValue() && !isa<UndefValue>(C))
1076         return false;
1077 
1078       continue;
1079     }
1080 
1081     if (isa<Argument>(RetVal))
1082       return false;
1083 
1084     if (Instruction *RVI = dyn_cast<Instruction>(RetVal))
1085       switch (RVI->getOpcode()) {
1086       // Extend the analysis by looking upwards.
1087       case Instruction::BitCast:
1088       case Instruction::GetElementPtr:
1089       case Instruction::AddrSpaceCast:
1090         FlowsToReturn.insert(RVI->getOperand(0));
1091         continue;
1092       case Instruction::Select: {
1093         SelectInst *SI = cast<SelectInst>(RVI);
1094         FlowsToReturn.insert(SI->getTrueValue());
1095         FlowsToReturn.insert(SI->getFalseValue());
1096         continue;
1097       }
1098       case Instruction::PHI: {
1099         PHINode *PN = cast<PHINode>(RVI);
1100         for (Value *IncValue : PN->incoming_values())
1101           FlowsToReturn.insert(IncValue);
1102         continue;
1103       }
1104 
1105       // Check whether the pointer came from an allocation.
1106       case Instruction::Alloca:
1107         break;
1108       case Instruction::Call:
1109       case Instruction::Invoke: {
1110         CallBase &CB = cast<CallBase>(*RVI);
1111         if (CB.hasRetAttr(Attribute::NoAlias))
1112           break;
1113         if (CB.getCalledFunction() && SCCNodes.count(CB.getCalledFunction()))
1114           break;
1115         LLVM_FALLTHROUGH;
1116       }
1117       default:
1118         return false; // Did not come from an allocation.
1119       }
1120 
1121     if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false))
1122       return false;
1123   }
1124 
1125   return true;
1126 }
1127 
1128 /// Deduce noalias attributes for the SCC.
1129 static void addNoAliasAttrs(const SCCNodeSet &SCCNodes,
1130                             SmallSet<Function *, 8> &Changed) {
1131   // Check each function in turn, determining which functions return noalias
1132   // pointers.
1133   for (Function *F : SCCNodes) {
1134     // Already noalias.
1135     if (F->returnDoesNotAlias())
1136       continue;
1137 
1138     // We can infer and propagate function attributes only when we know that the
1139     // definition we'll get at link time is *exactly* the definition we see now.
1140     // For more details, see GlobalValue::mayBeDerefined.
1141     if (!F->hasExactDefinition())
1142       return;
1143 
1144     // We annotate noalias return values, which are only applicable to
1145     // pointer types.
1146     if (!F->getReturnType()->isPointerTy())
1147       continue;
1148 
1149     if (!isFunctionMallocLike(F, SCCNodes))
1150       return;
1151   }
1152 
1153   for (Function *F : SCCNodes) {
1154     if (F->returnDoesNotAlias() ||
1155         !F->getReturnType()->isPointerTy())
1156       continue;
1157 
1158     F->setReturnDoesNotAlias();
1159     ++NumNoAlias;
1160     Changed.insert(F);
1161   }
1162 }
1163 
1164 /// Tests whether this function is known to not return null.
1165 ///
1166 /// Requires that the function returns a pointer.
1167 ///
1168 /// Returns true if it believes the function will not return a null, and sets
1169 /// \p Speculative based on whether the returned conclusion is a speculative
1170 /// conclusion due to SCC calls.
1171 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
1172                             bool &Speculative) {
1173   assert(F->getReturnType()->isPointerTy() &&
1174          "nonnull only meaningful on pointer types");
1175   Speculative = false;
1176 
1177   SmallSetVector<Value *, 8> FlowsToReturn;
1178   for (BasicBlock &BB : *F)
1179     if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
1180       FlowsToReturn.insert(Ret->getReturnValue());
1181 
1182   auto &DL = F->getParent()->getDataLayout();
1183 
1184   for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
1185     Value *RetVal = FlowsToReturn[i];
1186 
1187     // If this value is locally known to be non-null, we're good
1188     if (isKnownNonZero(RetVal, DL))
1189       continue;
1190 
1191     // Otherwise, we need to look upwards since we can't make any local
1192     // conclusions.
1193     Instruction *RVI = dyn_cast<Instruction>(RetVal);
1194     if (!RVI)
1195       return false;
1196     switch (RVI->getOpcode()) {
1197     // Extend the analysis by looking upwards.
1198     case Instruction::BitCast:
1199     case Instruction::GetElementPtr:
1200     case Instruction::AddrSpaceCast:
1201       FlowsToReturn.insert(RVI->getOperand(0));
1202       continue;
1203     case Instruction::Select: {
1204       SelectInst *SI = cast<SelectInst>(RVI);
1205       FlowsToReturn.insert(SI->getTrueValue());
1206       FlowsToReturn.insert(SI->getFalseValue());
1207       continue;
1208     }
1209     case Instruction::PHI: {
1210       PHINode *PN = cast<PHINode>(RVI);
1211       for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1212         FlowsToReturn.insert(PN->getIncomingValue(i));
1213       continue;
1214     }
1215     case Instruction::Call:
1216     case Instruction::Invoke: {
1217       CallBase &CB = cast<CallBase>(*RVI);
1218       Function *Callee = CB.getCalledFunction();
1219       // A call to a node within the SCC is assumed to return null until
1220       // proven otherwise
1221       if (Callee && SCCNodes.count(Callee)) {
1222         Speculative = true;
1223         continue;
1224       }
1225       return false;
1226     }
1227     default:
1228       return false; // Unknown source, may be null
1229     };
1230     llvm_unreachable("should have either continued or returned");
1231   }
1232 
1233   return true;
1234 }
1235 
1236 /// Deduce nonnull attributes for the SCC.
1237 static void addNonNullAttrs(const SCCNodeSet &SCCNodes,
1238                             SmallSet<Function *, 8> &Changed) {
1239   // Speculative that all functions in the SCC return only nonnull
1240   // pointers.  We may refute this as we analyze functions.
1241   bool SCCReturnsNonNull = true;
1242 
1243   // Check each function in turn, determining which functions return nonnull
1244   // pointers.
1245   for (Function *F : SCCNodes) {
1246     // Already nonnull.
1247     if (F->getAttributes().hasRetAttr(Attribute::NonNull))
1248       continue;
1249 
1250     // We can infer and propagate function attributes only when we know that the
1251     // definition we'll get at link time is *exactly* the definition we see now.
1252     // For more details, see GlobalValue::mayBeDerefined.
1253     if (!F->hasExactDefinition())
1254       return;
1255 
1256     // We annotate nonnull return values, which are only applicable to
1257     // pointer types.
1258     if (!F->getReturnType()->isPointerTy())
1259       continue;
1260 
1261     bool Speculative = false;
1262     if (isReturnNonNull(F, SCCNodes, Speculative)) {
1263       if (!Speculative) {
1264         // Mark the function eagerly since we may discover a function
1265         // which prevents us from speculating about the entire SCC
1266         LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName()
1267                           << " as nonnull\n");
1268         F->addRetAttr(Attribute::NonNull);
1269         ++NumNonNullReturn;
1270         Changed.insert(F);
1271       }
1272       continue;
1273     }
1274     // At least one function returns something which could be null, can't
1275     // speculate any more.
1276     SCCReturnsNonNull = false;
1277   }
1278 
1279   if (SCCReturnsNonNull) {
1280     for (Function *F : SCCNodes) {
1281       if (F->getAttributes().hasRetAttr(Attribute::NonNull) ||
1282           !F->getReturnType()->isPointerTy())
1283         continue;
1284 
1285       LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n");
1286       F->addRetAttr(Attribute::NonNull);
1287       ++NumNonNullReturn;
1288       Changed.insert(F);
1289     }
1290   }
1291 }
1292 
1293 namespace {
1294 
1295 /// Collects a set of attribute inference requests and performs them all in one
1296 /// go on a single SCC Node. Inference involves scanning function bodies
1297 /// looking for instructions that violate attribute assumptions.
1298 /// As soon as all the bodies are fine we are free to set the attribute.
1299 /// Customization of inference for individual attributes is performed by
1300 /// providing a handful of predicates for each attribute.
1301 class AttributeInferer {
1302 public:
1303   /// Describes a request for inference of a single attribute.
1304   struct InferenceDescriptor {
1305 
1306     /// Returns true if this function does not have to be handled.
1307     /// General intent for this predicate is to provide an optimization
1308     /// for functions that do not need this attribute inference at all
1309     /// (say, for functions that already have the attribute).
1310     std::function<bool(const Function &)> SkipFunction;
1311 
1312     /// Returns true if this instruction violates attribute assumptions.
1313     std::function<bool(Instruction &)> InstrBreaksAttribute;
1314 
1315     /// Sets the inferred attribute for this function.
1316     std::function<void(Function &)> SetAttribute;
1317 
1318     /// Attribute we derive.
1319     Attribute::AttrKind AKind;
1320 
1321     /// If true, only "exact" definitions can be used to infer this attribute.
1322     /// See GlobalValue::isDefinitionExact.
1323     bool RequiresExactDefinition;
1324 
1325     InferenceDescriptor(Attribute::AttrKind AK,
1326                         std::function<bool(const Function &)> SkipFunc,
1327                         std::function<bool(Instruction &)> InstrScan,
1328                         std::function<void(Function &)> SetAttr,
1329                         bool ReqExactDef)
1330         : SkipFunction(SkipFunc), InstrBreaksAttribute(InstrScan),
1331           SetAttribute(SetAttr), AKind(AK),
1332           RequiresExactDefinition(ReqExactDef) {}
1333   };
1334 
1335 private:
1336   SmallVector<InferenceDescriptor, 4> InferenceDescriptors;
1337 
1338 public:
1339   void registerAttrInference(InferenceDescriptor AttrInference) {
1340     InferenceDescriptors.push_back(AttrInference);
1341   }
1342 
1343   void run(const SCCNodeSet &SCCNodes, SmallSet<Function *, 8> &Changed);
1344 };
1345 
1346 /// Perform all the requested attribute inference actions according to the
1347 /// attribute predicates stored before.
1348 void AttributeInferer::run(const SCCNodeSet &SCCNodes,
1349                            SmallSet<Function *, 8> &Changed) {
1350   SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors;
1351   // Go through all the functions in SCC and check corresponding attribute
1352   // assumptions for each of them. Attributes that are invalid for this SCC
1353   // will be removed from InferInSCC.
1354   for (Function *F : SCCNodes) {
1355 
1356     // No attributes whose assumptions are still valid - done.
1357     if (InferInSCC.empty())
1358       return;
1359 
1360     // Check if our attributes ever need scanning/can be scanned.
1361     llvm::erase_if(InferInSCC, [F](const InferenceDescriptor &ID) {
1362       if (ID.SkipFunction(*F))
1363         return false;
1364 
1365       // Remove from further inference (invalidate) when visiting a function
1366       // that has no instructions to scan/has an unsuitable definition.
1367       return F->isDeclaration() ||
1368              (ID.RequiresExactDefinition && !F->hasExactDefinition());
1369     });
1370 
1371     // For each attribute still in InferInSCC that doesn't explicitly skip F,
1372     // set up the F instructions scan to verify assumptions of the attribute.
1373     SmallVector<InferenceDescriptor, 4> InferInThisFunc;
1374     llvm::copy_if(
1375         InferInSCC, std::back_inserter(InferInThisFunc),
1376         [F](const InferenceDescriptor &ID) { return !ID.SkipFunction(*F); });
1377 
1378     if (InferInThisFunc.empty())
1379       continue;
1380 
1381     // Start instruction scan.
1382     for (Instruction &I : instructions(*F)) {
1383       llvm::erase_if(InferInThisFunc, [&](const InferenceDescriptor &ID) {
1384         if (!ID.InstrBreaksAttribute(I))
1385           return false;
1386         // Remove attribute from further inference on any other functions
1387         // because attribute assumptions have just been violated.
1388         llvm::erase_if(InferInSCC, [&ID](const InferenceDescriptor &D) {
1389           return D.AKind == ID.AKind;
1390         });
1391         // Remove attribute from the rest of current instruction scan.
1392         return true;
1393       });
1394 
1395       if (InferInThisFunc.empty())
1396         break;
1397     }
1398   }
1399 
1400   if (InferInSCC.empty())
1401     return;
1402 
1403   for (Function *F : SCCNodes)
1404     // At this point InferInSCC contains only functions that were either:
1405     //   - explicitly skipped from scan/inference, or
1406     //   - verified to have no instructions that break attribute assumptions.
1407     // Hence we just go and force the attribute for all non-skipped functions.
1408     for (auto &ID : InferInSCC) {
1409       if (ID.SkipFunction(*F))
1410         continue;
1411       Changed.insert(F);
1412       ID.SetAttribute(*F);
1413     }
1414 }
1415 
1416 struct SCCNodesResult {
1417   SCCNodeSet SCCNodes;
1418   bool HasUnknownCall;
1419 };
1420 
1421 } // end anonymous namespace
1422 
1423 /// Helper for non-Convergent inference predicate InstrBreaksAttribute.
1424 static bool InstrBreaksNonConvergent(Instruction &I,
1425                                      const SCCNodeSet &SCCNodes) {
1426   const CallBase *CB = dyn_cast<CallBase>(&I);
1427   // Breaks non-convergent assumption if CS is a convergent call to a function
1428   // not in the SCC.
1429   return CB && CB->isConvergent() &&
1430          !SCCNodes.contains(CB->getCalledFunction());
1431 }
1432 
1433 /// Helper for NoUnwind inference predicate InstrBreaksAttribute.
1434 static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) {
1435   if (!I.mayThrow())
1436     return false;
1437   if (const auto *CI = dyn_cast<CallInst>(&I)) {
1438     if (Function *Callee = CI->getCalledFunction()) {
1439       // I is a may-throw call to a function inside our SCC. This doesn't
1440       // invalidate our current working assumption that the SCC is no-throw; we
1441       // just have to scan that other function.
1442       if (SCCNodes.contains(Callee))
1443         return false;
1444     }
1445   }
1446   return true;
1447 }
1448 
1449 /// Helper for NoFree inference predicate InstrBreaksAttribute.
1450 static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes) {
1451   CallBase *CB = dyn_cast<CallBase>(&I);
1452   if (!CB)
1453     return false;
1454 
1455   if (CB->hasFnAttr(Attribute::NoFree))
1456     return false;
1457 
1458   // Speculatively assume in SCC.
1459   if (Function *Callee = CB->getCalledFunction())
1460     if (SCCNodes.contains(Callee))
1461       return false;
1462 
1463   return true;
1464 }
1465 
1466 /// Attempt to remove convergent function attribute when possible.
1467 ///
1468 /// Returns true if any changes to function attributes were made.
1469 static void inferConvergent(const SCCNodeSet &SCCNodes,
1470                             SmallSet<Function *, 8> &Changed) {
1471   AttributeInferer AI;
1472 
1473   // Request to remove the convergent attribute from all functions in the SCC
1474   // if every callsite within the SCC is not convergent (except for calls
1475   // to functions within the SCC).
1476   // Note: Removal of the attr from the callsites will happen in
1477   // InstCombineCalls separately.
1478   AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1479       Attribute::Convergent,
1480       // Skip non-convergent functions.
1481       [](const Function &F) { return !F.isConvergent(); },
1482       // Instructions that break non-convergent assumption.
1483       [SCCNodes](Instruction &I) {
1484         return InstrBreaksNonConvergent(I, SCCNodes);
1485       },
1486       [](Function &F) {
1487         LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName()
1488                           << "\n");
1489         F.setNotConvergent();
1490       },
1491       /* RequiresExactDefinition= */ false});
1492   // Perform all the requested attribute inference actions.
1493   AI.run(SCCNodes, Changed);
1494 }
1495 
1496 /// Infer attributes from all functions in the SCC by scanning every
1497 /// instruction for compliance to the attribute assumptions. Currently it
1498 /// does:
1499 ///   - addition of NoUnwind attribute
1500 ///
1501 /// Returns true if any changes to function attributes were made.
1502 static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes,
1503                                          SmallSet<Function *, 8> &Changed) {
1504   AttributeInferer AI;
1505 
1506   if (!DisableNoUnwindInference)
1507     // Request to infer nounwind attribute for all the functions in the SCC if
1508     // every callsite within the SCC is not throwing (except for calls to
1509     // functions within the SCC). Note that nounwind attribute suffers from
1510     // derefinement - results may change depending on how functions are
1511     // optimized. Thus it can be inferred only from exact definitions.
1512     AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1513         Attribute::NoUnwind,
1514         // Skip non-throwing functions.
1515         [](const Function &F) { return F.doesNotThrow(); },
1516         // Instructions that break non-throwing assumption.
1517         [&SCCNodes](Instruction &I) {
1518           return InstrBreaksNonThrowing(I, SCCNodes);
1519         },
1520         [](Function &F) {
1521           LLVM_DEBUG(dbgs()
1522                      << "Adding nounwind attr to fn " << F.getName() << "\n");
1523           F.setDoesNotThrow();
1524           ++NumNoUnwind;
1525         },
1526         /* RequiresExactDefinition= */ true});
1527 
1528   if (!DisableNoFreeInference)
1529     // Request to infer nofree attribute for all the functions in the SCC if
1530     // every callsite within the SCC does not directly or indirectly free
1531     // memory (except for calls to functions within the SCC). Note that nofree
1532     // attribute suffers from derefinement - results may change depending on
1533     // how functions are optimized. Thus it can be inferred only from exact
1534     // definitions.
1535     AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1536         Attribute::NoFree,
1537         // Skip functions known not to free memory.
1538         [](const Function &F) { return F.doesNotFreeMemory(); },
1539         // Instructions that break non-deallocating assumption.
1540         [&SCCNodes](Instruction &I) {
1541           return InstrBreaksNoFree(I, SCCNodes);
1542         },
1543         [](Function &F) {
1544           LLVM_DEBUG(dbgs()
1545                      << "Adding nofree attr to fn " << F.getName() << "\n");
1546           F.setDoesNotFreeMemory();
1547           ++NumNoFree;
1548         },
1549         /* RequiresExactDefinition= */ true});
1550 
1551   // Perform all the requested attribute inference actions.
1552   AI.run(SCCNodes, Changed);
1553 }
1554 
1555 static void addNoRecurseAttrs(const SCCNodeSet &SCCNodes,
1556                               SmallSet<Function *, 8> &Changed) {
1557   // Try and identify functions that do not recurse.
1558 
1559   // If the SCC contains multiple nodes we know for sure there is recursion.
1560   if (SCCNodes.size() != 1)
1561     return;
1562 
1563   Function *F = *SCCNodes.begin();
1564   if (!F || !F->hasExactDefinition() || F->doesNotRecurse())
1565     return;
1566 
1567   // If all of the calls in F are identifiable and are to norecurse functions, F
1568   // is norecurse. This check also detects self-recursion as F is not currently
1569   // marked norecurse, so any called from F to F will not be marked norecurse.
1570   for (auto &BB : *F)
1571     for (auto &I : BB.instructionsWithoutDebug())
1572       if (auto *CB = dyn_cast<CallBase>(&I)) {
1573         Function *Callee = CB->getCalledFunction();
1574         if (!Callee || Callee == F || !Callee->doesNotRecurse())
1575           // Function calls a potentially recursive function.
1576           return;
1577       }
1578 
1579   // Every call was to a non-recursive function other than this function, and
1580   // we have no indirect recursion as the SCC size is one. This function cannot
1581   // recurse.
1582   F->setDoesNotRecurse();
1583   ++NumNoRecurse;
1584   Changed.insert(F);
1585 }
1586 
1587 static bool instructionDoesNotReturn(Instruction &I) {
1588   if (auto *CB = dyn_cast<CallBase>(&I))
1589     return CB->hasFnAttr(Attribute::NoReturn);
1590   return false;
1591 }
1592 
1593 // A basic block can only return if it terminates with a ReturnInst and does not
1594 // contain calls to noreturn functions.
1595 static bool basicBlockCanReturn(BasicBlock &BB) {
1596   if (!isa<ReturnInst>(BB.getTerminator()))
1597     return false;
1598   return none_of(BB, instructionDoesNotReturn);
1599 }
1600 
1601 // Set the noreturn function attribute if possible.
1602 static void addNoReturnAttrs(const SCCNodeSet &SCCNodes,
1603                              SmallSet<Function *, 8> &Changed) {
1604   for (Function *F : SCCNodes) {
1605     if (!F || !F->hasExactDefinition() || F->hasFnAttribute(Attribute::Naked) ||
1606         F->doesNotReturn())
1607       continue;
1608 
1609     // The function can return if any basic blocks can return.
1610     // FIXME: this doesn't handle recursion or unreachable blocks.
1611     if (none_of(*F, basicBlockCanReturn)) {
1612       F->setDoesNotReturn();
1613       Changed.insert(F);
1614     }
1615   }
1616 }
1617 
1618 static bool functionWillReturn(const Function &F) {
1619   // We can infer and propagate function attributes only when we know that the
1620   // definition we'll get at link time is *exactly* the definition we see now.
1621   // For more details, see GlobalValue::mayBeDerefined.
1622   if (!F.hasExactDefinition())
1623     return false;
1624 
1625   // Must-progress function without side-effects must return.
1626   if (F.mustProgress() && F.onlyReadsMemory())
1627     return true;
1628 
1629   // Can only analyze functions with a definition.
1630   if (F.isDeclaration())
1631     return false;
1632 
1633   // Functions with loops require more sophisticated analysis, as the loop
1634   // may be infinite. For now, don't try to handle them.
1635   SmallVector<std::pair<const BasicBlock *, const BasicBlock *>> Backedges;
1636   FindFunctionBackedges(F, Backedges);
1637   if (!Backedges.empty())
1638     return false;
1639 
1640   // If there are no loops, then the function is willreturn if all calls in
1641   // it are willreturn.
1642   return all_of(instructions(F), [](const Instruction &I) {
1643     return I.willReturn();
1644   });
1645 }
1646 
1647 // Set the willreturn function attribute if possible.
1648 static void addWillReturn(const SCCNodeSet &SCCNodes,
1649                           SmallSet<Function *, 8> &Changed) {
1650   for (Function *F : SCCNodes) {
1651     if (!F || F->willReturn() || !functionWillReturn(*F))
1652       continue;
1653 
1654     F->setWillReturn();
1655     NumWillReturn++;
1656     Changed.insert(F);
1657   }
1658 }
1659 
1660 // Return true if this is an atomic which has an ordering stronger than
1661 // unordered.  Note that this is different than the predicate we use in
1662 // Attributor.  Here we chose to be conservative and consider monotonic
1663 // operations potentially synchronizing.  We generally don't do much with
1664 // monotonic operations, so this is simply risk reduction.
1665 static bool isOrderedAtomic(Instruction *I) {
1666   if (!I->isAtomic())
1667     return false;
1668 
1669   if (auto *FI = dyn_cast<FenceInst>(I))
1670     // All legal orderings for fence are stronger than monotonic.
1671     return FI->getSyncScopeID() != SyncScope::SingleThread;
1672   else if (isa<AtomicCmpXchgInst>(I) || isa<AtomicRMWInst>(I))
1673     return true;
1674   else if (auto *SI = dyn_cast<StoreInst>(I))
1675     return !SI->isUnordered();
1676   else if (auto *LI = dyn_cast<LoadInst>(I))
1677     return !LI->isUnordered();
1678   else {
1679     llvm_unreachable("unknown atomic instruction?");
1680   }
1681 }
1682 
1683 static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes) {
1684   // Volatile may synchronize
1685   if (I.isVolatile())
1686     return true;
1687 
1688   // An ordered atomic may synchronize.  (See comment about on monotonic.)
1689   if (isOrderedAtomic(&I))
1690     return true;
1691 
1692   auto *CB = dyn_cast<CallBase>(&I);
1693   if (!CB)
1694     // Non call site cases covered by the two checks above
1695     return false;
1696 
1697   if (CB->hasFnAttr(Attribute::NoSync))
1698     return false;
1699 
1700   // Non volatile memset/memcpy/memmoves are nosync
1701   // NOTE: Only intrinsics with volatile flags should be handled here.  All
1702   // others should be marked in Intrinsics.td.
1703   if (auto *MI = dyn_cast<MemIntrinsic>(&I))
1704     if (!MI->isVolatile())
1705       return false;
1706 
1707   // Speculatively assume in SCC.
1708   if (Function *Callee = CB->getCalledFunction())
1709     if (SCCNodes.contains(Callee))
1710       return false;
1711 
1712   return true;
1713 }
1714 
1715 // Infer the nosync attribute.
1716 static void addNoSyncAttr(const SCCNodeSet &SCCNodes,
1717                           SmallSet<Function *, 8> &Changed) {
1718   AttributeInferer AI;
1719   AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1720       Attribute::NoSync,
1721       // Skip already marked functions.
1722       [](const Function &F) { return F.hasNoSync(); },
1723       // Instructions that break nosync assumption.
1724       [&SCCNodes](Instruction &I) {
1725         return InstrBreaksNoSync(I, SCCNodes);
1726       },
1727       [](Function &F) {
1728         LLVM_DEBUG(dbgs()
1729                    << "Adding nosync attr to fn " << F.getName() << "\n");
1730         F.setNoSync();
1731         ++NumNoSync;
1732       },
1733       /* RequiresExactDefinition= */ true});
1734   AI.run(SCCNodes, Changed);
1735 }
1736 
1737 static SCCNodesResult createSCCNodeSet(ArrayRef<Function *> Functions) {
1738   SCCNodesResult Res;
1739   Res.HasUnknownCall = false;
1740   for (Function *F : Functions) {
1741     if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked) ||
1742         F->isPresplitCoroutine()) {
1743       // Treat any function we're trying not to optimize as if it were an
1744       // indirect call and omit it from the node set used below.
1745       Res.HasUnknownCall = true;
1746       continue;
1747     }
1748     // Track whether any functions in this SCC have an unknown call edge.
1749     // Note: if this is ever a performance hit, we can common it with
1750     // subsequent routines which also do scans over the instructions of the
1751     // function.
1752     if (!Res.HasUnknownCall) {
1753       for (Instruction &I : instructions(*F)) {
1754         if (auto *CB = dyn_cast<CallBase>(&I)) {
1755           if (!CB->getCalledFunction()) {
1756             Res.HasUnknownCall = true;
1757             break;
1758           }
1759         }
1760       }
1761     }
1762     Res.SCCNodes.insert(F);
1763   }
1764   return Res;
1765 }
1766 
1767 template <typename AARGetterT>
1768 static SmallSet<Function *, 8>
1769 deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter) {
1770   SCCNodesResult Nodes = createSCCNodeSet(Functions);
1771 
1772   // Bail if the SCC only contains optnone functions.
1773   if (Nodes.SCCNodes.empty())
1774     return {};
1775 
1776   SmallSet<Function *, 8> Changed;
1777 
1778   addArgumentReturnedAttrs(Nodes.SCCNodes, Changed);
1779   addReadAttrs(Nodes.SCCNodes, AARGetter, Changed);
1780   addArgumentAttrs(Nodes.SCCNodes, Changed);
1781   inferConvergent(Nodes.SCCNodes, Changed);
1782   addNoReturnAttrs(Nodes.SCCNodes, Changed);
1783   addWillReturn(Nodes.SCCNodes, Changed);
1784 
1785   // If we have no external nodes participating in the SCC, we can deduce some
1786   // more precise attributes as well.
1787   if (!Nodes.HasUnknownCall) {
1788     addNoAliasAttrs(Nodes.SCCNodes, Changed);
1789     addNonNullAttrs(Nodes.SCCNodes, Changed);
1790     inferAttrsFromFunctionBodies(Nodes.SCCNodes, Changed);
1791     addNoRecurseAttrs(Nodes.SCCNodes, Changed);
1792   }
1793 
1794   addNoSyncAttr(Nodes.SCCNodes, Changed);
1795 
1796   // Finally, infer the maximal set of attributes from the ones we've inferred
1797   // above.  This is handling the cases where one attribute on a signature
1798   // implies another, but for implementation reasons the inference rule for
1799   // the later is missing (or simply less sophisticated).
1800   for (Function *F : Nodes.SCCNodes)
1801     if (F)
1802       if (inferAttributesFromOthers(*F))
1803         Changed.insert(F);
1804 
1805   return Changed;
1806 }
1807 
1808 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C,
1809                                                   CGSCCAnalysisManager &AM,
1810                                                   LazyCallGraph &CG,
1811                                                   CGSCCUpdateResult &) {
1812   FunctionAnalysisManager &FAM =
1813       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1814 
1815   // We pass a lambda into functions to wire them up to the analysis manager
1816   // for getting function analyses.
1817   auto AARGetter = [&](Function &F) -> AAResults & {
1818     return FAM.getResult<AAManager>(F);
1819   };
1820 
1821   SmallVector<Function *, 8> Functions;
1822   for (LazyCallGraph::Node &N : C) {
1823     Functions.push_back(&N.getFunction());
1824   }
1825 
1826   auto ChangedFunctions = deriveAttrsInPostOrder(Functions, AARGetter);
1827   if (ChangedFunctions.empty())
1828     return PreservedAnalyses::all();
1829 
1830   // Invalidate analyses for modified functions so that we don't have to
1831   // invalidate all analyses for all functions in this SCC.
1832   PreservedAnalyses FuncPA;
1833   // We haven't changed the CFG for modified functions.
1834   FuncPA.preserveSet<CFGAnalyses>();
1835   for (Function *Changed : ChangedFunctions) {
1836     FAM.invalidate(*Changed, FuncPA);
1837     // Also invalidate any direct callers of changed functions since analyses
1838     // may care about attributes of direct callees. For example, MemorySSA cares
1839     // about whether or not a call's callee modifies memory and queries that
1840     // through function attributes.
1841     for (auto *U : Changed->users()) {
1842       if (auto *Call = dyn_cast<CallBase>(U)) {
1843         if (Call->getCalledFunction() == Changed)
1844           FAM.invalidate(*Call->getFunction(), FuncPA);
1845       }
1846     }
1847   }
1848 
1849   PreservedAnalyses PA;
1850   // We have not added or removed functions.
1851   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1852   // We already invalidated all relevant function analyses above.
1853   PA.preserveSet<AllAnalysesOn<Function>>();
1854   return PA;
1855 }
1856 
1857 namespace {
1858 
1859 struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass {
1860   // Pass identification, replacement for typeid
1861   static char ID;
1862 
1863   PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) {
1864     initializePostOrderFunctionAttrsLegacyPassPass(
1865         *PassRegistry::getPassRegistry());
1866   }
1867 
1868   bool runOnSCC(CallGraphSCC &SCC) override;
1869 
1870   void getAnalysisUsage(AnalysisUsage &AU) const override {
1871     AU.setPreservesCFG();
1872     AU.addRequired<AssumptionCacheTracker>();
1873     getAAResultsAnalysisUsage(AU);
1874     CallGraphSCCPass::getAnalysisUsage(AU);
1875   }
1876 };
1877 
1878 } // end anonymous namespace
1879 
1880 char PostOrderFunctionAttrsLegacyPass::ID = 0;
1881 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "function-attrs",
1882                       "Deduce function attributes", false, false)
1883 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1884 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1885 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "function-attrs",
1886                     "Deduce function attributes", false, false)
1887 
1888 Pass *llvm::createPostOrderFunctionAttrsLegacyPass() {
1889   return new PostOrderFunctionAttrsLegacyPass();
1890 }
1891 
1892 template <typename AARGetterT>
1893 static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) {
1894   SmallVector<Function *, 8> Functions;
1895   for (CallGraphNode *I : SCC) {
1896     Functions.push_back(I->getFunction());
1897   }
1898 
1899   return !deriveAttrsInPostOrder(Functions, AARGetter).empty();
1900 }
1901 
1902 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) {
1903   if (skipSCC(SCC))
1904     return false;
1905   return runImpl(SCC, LegacyAARGetter(*this));
1906 }
1907 
1908 namespace {
1909 
1910 struct ReversePostOrderFunctionAttrsLegacyPass : public ModulePass {
1911   // Pass identification, replacement for typeid
1912   static char ID;
1913 
1914   ReversePostOrderFunctionAttrsLegacyPass() : ModulePass(ID) {
1915     initializeReversePostOrderFunctionAttrsLegacyPassPass(
1916         *PassRegistry::getPassRegistry());
1917   }
1918 
1919   bool runOnModule(Module &M) override;
1920 
1921   void getAnalysisUsage(AnalysisUsage &AU) const override {
1922     AU.setPreservesCFG();
1923     AU.addRequired<CallGraphWrapperPass>();
1924     AU.addPreserved<CallGraphWrapperPass>();
1925   }
1926 };
1927 
1928 } // end anonymous namespace
1929 
1930 char ReversePostOrderFunctionAttrsLegacyPass::ID = 0;
1931 
1932 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrsLegacyPass,
1933                       "rpo-function-attrs", "Deduce function attributes in RPO",
1934                       false, false)
1935 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1936 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrsLegacyPass,
1937                     "rpo-function-attrs", "Deduce function attributes in RPO",
1938                     false, false)
1939 
1940 Pass *llvm::createReversePostOrderFunctionAttrsPass() {
1941   return new ReversePostOrderFunctionAttrsLegacyPass();
1942 }
1943 
1944 static bool addNoRecurseAttrsTopDown(Function &F) {
1945   // We check the preconditions for the function prior to calling this to avoid
1946   // the cost of building up a reversible post-order list. We assert them here
1947   // to make sure none of the invariants this relies on were violated.
1948   assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!");
1949   assert(!F.doesNotRecurse() &&
1950          "This function has already been deduced as norecurs!");
1951   assert(F.hasInternalLinkage() &&
1952          "Can only do top-down deduction for internal linkage functions!");
1953 
1954   // If F is internal and all of its uses are calls from a non-recursive
1955   // functions, then none of its calls could in fact recurse without going
1956   // through a function marked norecurse, and so we can mark this function too
1957   // as norecurse. Note that the uses must actually be calls -- otherwise
1958   // a pointer to this function could be returned from a norecurse function but
1959   // this function could be recursively (indirectly) called. Note that this
1960   // also detects if F is directly recursive as F is not yet marked as
1961   // a norecurse function.
1962   for (auto *U : F.users()) {
1963     auto *I = dyn_cast<Instruction>(U);
1964     if (!I)
1965       return false;
1966     CallBase *CB = dyn_cast<CallBase>(I);
1967     if (!CB || !CB->getParent()->getParent()->doesNotRecurse())
1968       return false;
1969   }
1970   F.setDoesNotRecurse();
1971   ++NumNoRecurse;
1972   return true;
1973 }
1974 
1975 static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) {
1976   // We only have a post-order SCC traversal (because SCCs are inherently
1977   // discovered in post-order), so we accumulate them in a vector and then walk
1978   // it in reverse. This is simpler than using the RPO iterator infrastructure
1979   // because we need to combine SCC detection and the PO walk of the call
1980   // graph. We can also cheat egregiously because we're primarily interested in
1981   // synthesizing norecurse and so we can only save the singular SCCs as SCCs
1982   // with multiple functions in them will clearly be recursive.
1983   SmallVector<Function *, 16> Worklist;
1984   for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) {
1985     if (I->size() != 1)
1986       continue;
1987 
1988     Function *F = I->front()->getFunction();
1989     if (F && !F->isDeclaration() && !F->doesNotRecurse() &&
1990         F->hasInternalLinkage())
1991       Worklist.push_back(F);
1992   }
1993 
1994   bool Changed = false;
1995   for (auto *F : llvm::reverse(Worklist))
1996     Changed |= addNoRecurseAttrsTopDown(*F);
1997 
1998   return Changed;
1999 }
2000 
2001 bool ReversePostOrderFunctionAttrsLegacyPass::runOnModule(Module &M) {
2002   if (skipModule(M))
2003     return false;
2004 
2005   auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2006 
2007   return deduceFunctionAttributeInRPO(M, CG);
2008 }
2009 
2010 PreservedAnalyses
2011 ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) {
2012   auto &CG = AM.getResult<CallGraphAnalysis>(M);
2013 
2014   if (!deduceFunctionAttributeInRPO(M, CG))
2015     return PreservedAnalyses::all();
2016 
2017   PreservedAnalyses PA;
2018   PA.preserve<CallGraphAnalysis>();
2019   return PA;
2020 }
2021