xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SetOperations.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumeBundleQueries.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LazyValueInfo.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
29 #include "llvm/Analysis/ScalarEvolution.h"
30 #include "llvm/Analysis/TargetTransformInfo.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/Assumptions.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/NoFolder.h"
39 #include "llvm/Support/Alignment.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/FileSystem.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
46 #include "llvm/Transforms/Utils/Local.h"
47 #include <cassert>
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "attributor"
52 
53 static cl::opt<bool> ManifestInternal(
54     "attributor-manifest-internal", cl::Hidden,
55     cl::desc("Manifest Attributor internal string attributes."),
56     cl::init(false));
57 
58 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
59                                        cl::Hidden);
60 
61 template <>
62 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
63 
64 static cl::opt<unsigned, true> MaxPotentialValues(
65     "attributor-max-potential-values", cl::Hidden,
66     cl::desc("Maximum number of potential values to be "
67              "tracked for each position."),
68     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
69     cl::init(7));
70 
71 STATISTIC(NumAAs, "Number of abstract attributes created");
72 
73 // Some helper macros to deal with statistics tracking.
74 //
75 // Usage:
76 // For simple IR attribute tracking overload trackStatistics in the abstract
77 // attribute and choose the right STATS_DECLTRACK_********* macro,
78 // e.g.,:
79 //  void trackStatistics() const override {
80 //    STATS_DECLTRACK_ARG_ATTR(returned)
81 //  }
82 // If there is a single "increment" side one can use the macro
83 // STATS_DECLTRACK with a custom message. If there are multiple increment
84 // sides, STATS_DECL and STATS_TRACK can also be used separately.
85 //
86 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
87   ("Number of " #TYPE " marked '" #NAME "'")
88 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
89 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
90 #define STATS_DECL(NAME, TYPE, MSG)                                            \
91   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
92 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
93 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
94   {                                                                            \
95     STATS_DECL(NAME, TYPE, MSG)                                                \
96     STATS_TRACK(NAME, TYPE)                                                    \
97   }
98 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
99   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
100 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
101   STATS_DECLTRACK(NAME, CSArguments,                                           \
102                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
103 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
104   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
105 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
106   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
107 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
108   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
109                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
110 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
111   STATS_DECLTRACK(NAME, CSReturn,                                              \
112                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
113 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
114   STATS_DECLTRACK(NAME, Floating,                                              \
115                   ("Number of floating values known to be '" #NAME "'"))
116 
117 // Specialization of the operator<< for abstract attributes subclasses. This
118 // disambiguates situations where multiple operators are applicable.
119 namespace llvm {
120 #define PIPE_OPERATOR(CLASS)                                                   \
121   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
122     return OS << static_cast<const AbstractAttribute &>(AA);                   \
123   }
124 
125 PIPE_OPERATOR(AAIsDead)
126 PIPE_OPERATOR(AANoUnwind)
127 PIPE_OPERATOR(AANoSync)
128 PIPE_OPERATOR(AANoRecurse)
129 PIPE_OPERATOR(AAWillReturn)
130 PIPE_OPERATOR(AANoReturn)
131 PIPE_OPERATOR(AAReturnedValues)
132 PIPE_OPERATOR(AANonNull)
133 PIPE_OPERATOR(AANoAlias)
134 PIPE_OPERATOR(AADereferenceable)
135 PIPE_OPERATOR(AAAlign)
136 PIPE_OPERATOR(AANoCapture)
137 PIPE_OPERATOR(AAValueSimplify)
138 PIPE_OPERATOR(AANoFree)
139 PIPE_OPERATOR(AAHeapToStack)
140 PIPE_OPERATOR(AAReachability)
141 PIPE_OPERATOR(AAMemoryBehavior)
142 PIPE_OPERATOR(AAMemoryLocation)
143 PIPE_OPERATOR(AAValueConstantRange)
144 PIPE_OPERATOR(AAPrivatizablePtr)
145 PIPE_OPERATOR(AAUndefinedBehavior)
146 PIPE_OPERATOR(AAPotentialValues)
147 PIPE_OPERATOR(AANoUndef)
148 PIPE_OPERATOR(AACallEdges)
149 PIPE_OPERATOR(AAFunctionReachability)
150 PIPE_OPERATOR(AAPointerInfo)
151 PIPE_OPERATOR(AAAssumptionInfo)
152 
153 #undef PIPE_OPERATOR
154 
155 template <>
156 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
157                                                      const DerefState &R) {
158   ChangeStatus CS0 =
159       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
160   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
161   return CS0 | CS1;
162 }
163 
164 } // namespace llvm
165 
166 /// Get pointer operand of memory accessing instruction. If \p I is
167 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
168 /// is set to false and the instruction is volatile, return nullptr.
169 static const Value *getPointerOperand(const Instruction *I,
170                                       bool AllowVolatile) {
171   if (!AllowVolatile && I->isVolatile())
172     return nullptr;
173 
174   if (auto *LI = dyn_cast<LoadInst>(I)) {
175     return LI->getPointerOperand();
176   }
177 
178   if (auto *SI = dyn_cast<StoreInst>(I)) {
179     return SI->getPointerOperand();
180   }
181 
182   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
183     return CXI->getPointerOperand();
184   }
185 
186   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
187     return RMWI->getPointerOperand();
188   }
189 
190   return nullptr;
191 }
192 
193 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
194 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
195 /// getelement pointer instructions that traverse the natural type of \p Ptr if
196 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
197 /// through a cast to i8*.
198 ///
199 /// TODO: This could probably live somewhere more prominantly if it doesn't
200 ///       already exist.
201 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
202                                int64_t Offset, IRBuilder<NoFolder> &IRB,
203                                const DataLayout &DL) {
204   assert(Offset >= 0 && "Negative offset not supported yet!");
205   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
206                     << "-bytes as " << *ResTy << "\n");
207 
208   if (Offset) {
209     Type *Ty = PtrElemTy;
210     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
211     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
212 
213     SmallVector<Value *, 4> ValIndices;
214     std::string GEPName = Ptr->getName().str();
215     for (const APInt &Index : IntIndices) {
216       ValIndices.push_back(IRB.getInt(Index));
217       GEPName += "." + std::to_string(Index.getZExtValue());
218     }
219 
220     // Create a GEP for the indices collected above.
221     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
222 
223     // If an offset is left we use byte-wise adjustment.
224     if (IntOffset != 0) {
225       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
226       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
227                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
228     }
229   }
230 
231   // Ensure the result has the requested type.
232   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
233 
234   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
235   return Ptr;
236 }
237 
238 /// Recursively visit all values that might become \p IRP at some point. This
239 /// will be done by looking through cast instructions, selects, phis, and calls
240 /// with the "returned" attribute. Once we cannot look through the value any
241 /// further, the callback \p VisitValueCB is invoked and passed the current
242 /// value, the \p State, and a flag to indicate if we stripped anything.
243 /// Stripped means that we unpacked the value associated with \p IRP at least
244 /// once. Note that the value used for the callback may still be the value
245 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
246 /// we will never visit more values than specified by \p MaxValues.
247 template <typename StateTy>
248 static bool genericValueTraversal(
249     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
250     StateTy &State,
251     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
252         VisitValueCB,
253     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
254     function_ref<Value *(Value *)> StripCB = nullptr) {
255 
256   const AAIsDead *LivenessAA = nullptr;
257   if (IRP.getAnchorScope())
258     LivenessAA = &A.getAAFor<AAIsDead>(
259         QueryingAA,
260         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
261         DepClassTy::NONE);
262   bool AnyDead = false;
263 
264   Value *InitialV = &IRP.getAssociatedValue();
265   using Item = std::pair<Value *, const Instruction *>;
266   SmallSet<Item, 16> Visited;
267   SmallVector<Item, 16> Worklist;
268   Worklist.push_back({InitialV, CtxI});
269 
270   int Iteration = 0;
271   do {
272     Item I = Worklist.pop_back_val();
273     Value *V = I.first;
274     CtxI = I.second;
275     if (StripCB)
276       V = StripCB(V);
277 
278     // Check if we should process the current value. To prevent endless
279     // recursion keep a record of the values we followed!
280     if (!Visited.insert(I).second)
281       continue;
282 
283     // Make sure we limit the compile time for complex expressions.
284     if (Iteration++ >= MaxValues)
285       return false;
286 
287     // Explicitly look through calls with a "returned" attribute if we do
288     // not have a pointer as stripPointerCasts only works on them.
289     Value *NewV = nullptr;
290     if (V->getType()->isPointerTy()) {
291       NewV = V->stripPointerCasts();
292     } else {
293       auto *CB = dyn_cast<CallBase>(V);
294       if (CB && CB->getCalledFunction()) {
295         for (Argument &Arg : CB->getCalledFunction()->args())
296           if (Arg.hasReturnedAttr()) {
297             NewV = CB->getArgOperand(Arg.getArgNo());
298             break;
299           }
300       }
301     }
302     if (NewV && NewV != V) {
303       Worklist.push_back({NewV, CtxI});
304       continue;
305     }
306 
307     // Look through select instructions, visit assumed potential values.
308     if (auto *SI = dyn_cast<SelectInst>(V)) {
309       bool UsedAssumedInformation = false;
310       Optional<Constant *> C = A.getAssumedConstant(
311           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
312       bool NoValueYet = !C.hasValue();
313       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
314         continue;
315       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
316         if (CI->isZero())
317           Worklist.push_back({SI->getFalseValue(), CtxI});
318         else
319           Worklist.push_back({SI->getTrueValue(), CtxI});
320         continue;
321       }
322       // We could not simplify the condition, assume both values.(
323       Worklist.push_back({SI->getTrueValue(), CtxI});
324       Worklist.push_back({SI->getFalseValue(), CtxI});
325       continue;
326     }
327 
328     // Look through phi nodes, visit all live operands.
329     if (auto *PHI = dyn_cast<PHINode>(V)) {
330       assert(LivenessAA &&
331              "Expected liveness in the presence of instructions!");
332       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
333         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
334         bool UsedAssumedInformation = false;
335         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
336                             LivenessAA, UsedAssumedInformation,
337                             /* CheckBBLivenessOnly */ true)) {
338           AnyDead = true;
339           continue;
340         }
341         Worklist.push_back(
342             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
343       }
344       continue;
345     }
346 
347     if (UseValueSimplify && !isa<Constant>(V)) {
348       bool UsedAssumedInformation = false;
349       Optional<Value *> SimpleV =
350           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
351       if (!SimpleV.hasValue())
352         continue;
353       if (!SimpleV.getValue())
354         return false;
355       Value *NewV = SimpleV.getValue();
356       if (NewV != V) {
357         Worklist.push_back({NewV, CtxI});
358         continue;
359       }
360     }
361 
362     // Once a leaf is reached we inform the user through the callback.
363     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
364       return false;
365   } while (!Worklist.empty());
366 
367   // If we actually used liveness information so we have to record a dependence.
368   if (AnyDead)
369     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
370 
371   // All values have been visited.
372   return true;
373 }
374 
375 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
376                                      SmallVectorImpl<Value *> &Objects,
377                                      const AbstractAttribute &QueryingAA,
378                                      const Instruction *CtxI) {
379   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
380   SmallPtrSet<Value *, 8> SeenObjects;
381   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
382                                      SmallVectorImpl<Value *> &Objects,
383                                      bool) -> bool {
384     if (SeenObjects.insert(&Val).second)
385       Objects.push_back(&Val);
386     return true;
387   };
388   if (!genericValueTraversal<decltype(Objects)>(
389           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
390           true, 32, StripCB))
391     return false;
392   return true;
393 }
394 
395 const Value *stripAndAccumulateMinimalOffsets(
396     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
397     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
398     bool UseAssumed = false) {
399 
400   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
401     const IRPosition &Pos = IRPosition::value(V);
402     // Only track dependence if we are going to use the assumed info.
403     const AAValueConstantRange &ValueConstantRangeAA =
404         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
405                                          UseAssumed ? DepClassTy::OPTIONAL
406                                                     : DepClassTy::NONE);
407     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
408                                      : ValueConstantRangeAA.getKnown();
409     // We can only use the lower part of the range because the upper part can
410     // be higher than what the value can really be.
411     ROffset = Range.getSignedMin();
412     return true;
413   };
414 
415   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
416                                                 /* AllowInvariant */ false,
417                                                 AttributorAnalysis);
418 }
419 
420 static const Value *getMinimalBaseOfAccsesPointerOperand(
421     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
422     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
423   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
424   if (!Ptr)
425     return nullptr;
426   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
427   const Value *Base = stripAndAccumulateMinimalOffsets(
428       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
429 
430   BytesOffset = OffsetAPInt.getSExtValue();
431   return Base;
432 }
433 
434 static const Value *
435 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
436                                      const DataLayout &DL,
437                                      bool AllowNonInbounds = false) {
438   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
439   if (!Ptr)
440     return nullptr;
441 
442   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
443                                           AllowNonInbounds);
444 }
445 
446 /// Clamp the information known for all returned values of a function
447 /// (identified by \p QueryingAA) into \p S.
448 template <typename AAType, typename StateType = typename AAType::StateType>
449 static void clampReturnedValueStates(
450     Attributor &A, const AAType &QueryingAA, StateType &S,
451     const IRPosition::CallBaseContext *CBContext = nullptr) {
452   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
453                     << QueryingAA << " into " << S << "\n");
454 
455   assert((QueryingAA.getIRPosition().getPositionKind() ==
456               IRPosition::IRP_RETURNED ||
457           QueryingAA.getIRPosition().getPositionKind() ==
458               IRPosition::IRP_CALL_SITE_RETURNED) &&
459          "Can only clamp returned value states for a function returned or call "
460          "site returned position!");
461 
462   // Use an optional state as there might not be any return values and we want
463   // to join (IntegerState::operator&) the state of all there are.
464   Optional<StateType> T;
465 
466   // Callback for each possibly returned value.
467   auto CheckReturnValue = [&](Value &RV) -> bool {
468     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
469     const AAType &AA =
470         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
471     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
472                       << " @ " << RVPos << "\n");
473     const StateType &AAS = AA.getState();
474     if (T.hasValue())
475       *T &= AAS;
476     else
477       T = AAS;
478     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
479                       << "\n");
480     return T->isValidState();
481   };
482 
483   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
484     S.indicatePessimisticFixpoint();
485   else if (T.hasValue())
486     S ^= *T;
487 }
488 
489 namespace {
490 /// Helper class for generic deduction: return value -> returned position.
491 template <typename AAType, typename BaseType,
492           typename StateType = typename BaseType::StateType,
493           bool PropagateCallBaseContext = false>
494 struct AAReturnedFromReturnedValues : public BaseType {
495   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
496       : BaseType(IRP, A) {}
497 
498   /// See AbstractAttribute::updateImpl(...).
499   ChangeStatus updateImpl(Attributor &A) override {
500     StateType S(StateType::getBestState(this->getState()));
501     clampReturnedValueStates<AAType, StateType>(
502         A, *this, S,
503         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
504     // TODO: If we know we visited all returned values, thus no are assumed
505     // dead, we can take the known information from the state T.
506     return clampStateAndIndicateChange<StateType>(this->getState(), S);
507   }
508 };
509 
510 /// Clamp the information known at all call sites for a given argument
511 /// (identified by \p QueryingAA) into \p S.
512 template <typename AAType, typename StateType = typename AAType::StateType>
513 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
514                                         StateType &S) {
515   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
516                     << QueryingAA << " into " << S << "\n");
517 
518   assert(QueryingAA.getIRPosition().getPositionKind() ==
519              IRPosition::IRP_ARGUMENT &&
520          "Can only clamp call site argument states for an argument position!");
521 
522   // Use an optional state as there might not be any return values and we want
523   // to join (IntegerState::operator&) the state of all there are.
524   Optional<StateType> T;
525 
526   // The argument number which is also the call site argument number.
527   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
528 
529   auto CallSiteCheck = [&](AbstractCallSite ACS) {
530     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
531     // Check if a coresponding argument was found or if it is on not associated
532     // (which can happen for callback calls).
533     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
534       return false;
535 
536     const AAType &AA =
537         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
538     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
539                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
540     const StateType &AAS = AA.getState();
541     if (T.hasValue())
542       *T &= AAS;
543     else
544       T = AAS;
545     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
546                       << "\n");
547     return T->isValidState();
548   };
549 
550   bool AllCallSitesKnown;
551   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
552                               AllCallSitesKnown))
553     S.indicatePessimisticFixpoint();
554   else if (T.hasValue())
555     S ^= *T;
556 }
557 
558 /// This function is the bridge between argument position and the call base
559 /// context.
560 template <typename AAType, typename BaseType,
561           typename StateType = typename AAType::StateType>
562 bool getArgumentStateFromCallBaseContext(Attributor &A,
563                                          BaseType &QueryingAttribute,
564                                          IRPosition &Pos, StateType &State) {
565   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
566          "Expected an 'argument' position !");
567   const CallBase *CBContext = Pos.getCallBaseContext();
568   if (!CBContext)
569     return false;
570 
571   int ArgNo = Pos.getCallSiteArgNo();
572   assert(ArgNo >= 0 && "Invalid Arg No!");
573 
574   const auto &AA = A.getAAFor<AAType>(
575       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
576       DepClassTy::REQUIRED);
577   const StateType &CBArgumentState =
578       static_cast<const StateType &>(AA.getState());
579 
580   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
581                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
582                     << "\n");
583 
584   // NOTE: If we want to do call site grouping it should happen here.
585   State ^= CBArgumentState;
586   return true;
587 }
588 
589 /// Helper class for generic deduction: call site argument -> argument position.
590 template <typename AAType, typename BaseType,
591           typename StateType = typename AAType::StateType,
592           bool BridgeCallBaseContext = false>
593 struct AAArgumentFromCallSiteArguments : public BaseType {
594   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
595       : BaseType(IRP, A) {}
596 
597   /// See AbstractAttribute::updateImpl(...).
598   ChangeStatus updateImpl(Attributor &A) override {
599     StateType S = StateType::getBestState(this->getState());
600 
601     if (BridgeCallBaseContext) {
602       bool Success =
603           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
604               A, *this, this->getIRPosition(), S);
605       if (Success)
606         return clampStateAndIndicateChange<StateType>(this->getState(), S);
607     }
608     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
609 
610     // TODO: If we know we visited all incoming values, thus no are assumed
611     // dead, we can take the known information from the state T.
612     return clampStateAndIndicateChange<StateType>(this->getState(), S);
613   }
614 };
615 
616 /// Helper class for generic replication: function returned -> cs returned.
617 template <typename AAType, typename BaseType,
618           typename StateType = typename BaseType::StateType,
619           bool IntroduceCallBaseContext = false>
620 struct AACallSiteReturnedFromReturned : public BaseType {
621   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
622       : BaseType(IRP, A) {}
623 
624   /// See AbstractAttribute::updateImpl(...).
625   ChangeStatus updateImpl(Attributor &A) override {
626     assert(this->getIRPosition().getPositionKind() ==
627                IRPosition::IRP_CALL_SITE_RETURNED &&
628            "Can only wrap function returned positions for call site returned "
629            "positions!");
630     auto &S = this->getState();
631 
632     const Function *AssociatedFunction =
633         this->getIRPosition().getAssociatedFunction();
634     if (!AssociatedFunction)
635       return S.indicatePessimisticFixpoint();
636 
637     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
638     if (IntroduceCallBaseContext)
639       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
640                         << CBContext << "\n");
641 
642     IRPosition FnPos = IRPosition::returned(
643         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
644     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
645     return clampStateAndIndicateChange(S, AA.getState());
646   }
647 };
648 } // namespace
649 
650 /// Helper function to accumulate uses.
651 template <class AAType, typename StateType = typename AAType::StateType>
652 static void followUsesInContext(AAType &AA, Attributor &A,
653                                 MustBeExecutedContextExplorer &Explorer,
654                                 const Instruction *CtxI,
655                                 SetVector<const Use *> &Uses,
656                                 StateType &State) {
657   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
658   for (unsigned u = 0; u < Uses.size(); ++u) {
659     const Use *U = Uses[u];
660     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
661       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
662       if (Found && AA.followUseInMBEC(A, U, UserI, State))
663         for (const Use &Us : UserI->uses())
664           Uses.insert(&Us);
665     }
666   }
667 }
668 
669 /// Use the must-be-executed-context around \p I to add information into \p S.
670 /// The AAType class is required to have `followUseInMBEC` method with the
671 /// following signature and behaviour:
672 ///
673 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
674 /// U - Underlying use.
675 /// I - The user of the \p U.
676 /// Returns true if the value should be tracked transitively.
677 ///
678 template <class AAType, typename StateType = typename AAType::StateType>
679 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
680                              Instruction &CtxI) {
681 
682   // Container for (transitive) uses of the associated value.
683   SetVector<const Use *> Uses;
684   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
685     Uses.insert(&U);
686 
687   MustBeExecutedContextExplorer &Explorer =
688       A.getInfoCache().getMustBeExecutedContextExplorer();
689 
690   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
691 
692   if (S.isAtFixpoint())
693     return;
694 
695   SmallVector<const BranchInst *, 4> BrInsts;
696   auto Pred = [&](const Instruction *I) {
697     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
698       if (Br->isConditional())
699         BrInsts.push_back(Br);
700     return true;
701   };
702 
703   // Here, accumulate conditional branch instructions in the context. We
704   // explore the child paths and collect the known states. The disjunction of
705   // those states can be merged to its own state. Let ParentState_i be a state
706   // to indicate the known information for an i-th branch instruction in the
707   // context. ChildStates are created for its successors respectively.
708   //
709   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
710   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
711   //      ...
712   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
713   //
714   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
715   //
716   // FIXME: Currently, recursive branches are not handled. For example, we
717   // can't deduce that ptr must be dereferenced in below function.
718   //
719   // void f(int a, int c, int *ptr) {
720   //    if(a)
721   //      if (b) {
722   //        *ptr = 0;
723   //      } else {
724   //        *ptr = 1;
725   //      }
726   //    else {
727   //      if (b) {
728   //        *ptr = 0;
729   //      } else {
730   //        *ptr = 1;
731   //      }
732   //    }
733   // }
734 
735   Explorer.checkForAllContext(&CtxI, Pred);
736   for (const BranchInst *Br : BrInsts) {
737     StateType ParentState;
738 
739     // The known state of the parent state is a conjunction of children's
740     // known states so it is initialized with a best state.
741     ParentState.indicateOptimisticFixpoint();
742 
743     for (const BasicBlock *BB : Br->successors()) {
744       StateType ChildState;
745 
746       size_t BeforeSize = Uses.size();
747       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
748 
749       // Erase uses which only appear in the child.
750       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
751         It = Uses.erase(It);
752 
753       ParentState &= ChildState;
754     }
755 
756     // Use only known state.
757     S += ParentState;
758   }
759 }
760 
761 /// ------------------------ PointerInfo ---------------------------------------
762 
763 namespace llvm {
764 namespace AA {
765 namespace PointerInfo {
766 
767 /// An access kind description as used by AAPointerInfo.
768 struct OffsetAndSize;
769 
770 struct State;
771 
772 } // namespace PointerInfo
773 } // namespace AA
774 
775 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
776 template <>
777 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
778   using Access = AAPointerInfo::Access;
779   static inline Access getEmptyKey();
780   static inline Access getTombstoneKey();
781   static unsigned getHashValue(const Access &A);
782   static bool isEqual(const Access &LHS, const Access &RHS);
783 };
784 
785 /// Helper that allows OffsetAndSize as a key in a DenseMap.
786 template <>
787 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
788     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
789 
790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
791 /// but the instruction
792 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
793   using Base = DenseMapInfo<Instruction *>;
794   using Access = AAPointerInfo::Access;
795   static inline Access getEmptyKey();
796   static inline Access getTombstoneKey();
797   static unsigned getHashValue(const Access &A);
798   static bool isEqual(const Access &LHS, const Access &RHS);
799 };
800 
801 } // namespace llvm
802 
803 /// Helper to represent an access offset and size, with logic to deal with
804 /// uncertainty and check for overlapping accesses.
805 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
806   using BaseTy = std::pair<int64_t, int64_t>;
807   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
808   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
809   int64_t getOffset() const { return first; }
810   int64_t getSize() const { return second; }
811   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
812 
813   /// Return true if this offset and size pair might describe an address that
814   /// overlaps with \p OAS.
815   bool mayOverlap(const OffsetAndSize &OAS) const {
816     // Any unknown value and we are giving up -> overlap.
817     if (OAS.getOffset() == OffsetAndSize::Unknown ||
818         OAS.getSize() == OffsetAndSize::Unknown ||
819         getOffset() == OffsetAndSize::Unknown ||
820         getSize() == OffsetAndSize::Unknown)
821       return true;
822 
823     // Check if one offset point is in the other interval [offset, offset+size].
824     return OAS.getOffset() + OAS.getSize() > getOffset() &&
825            OAS.getOffset() < getOffset() + getSize();
826   }
827 
828   /// Constant used to represent unknown offset or sizes.
829   static constexpr int64_t Unknown = 1 << 31;
830 };
831 
832 /// Implementation of the DenseMapInfo.
833 ///
834 ///{
835 inline llvm::AccessAsInstructionInfo::Access
836 llvm::AccessAsInstructionInfo::getEmptyKey() {
837   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
838 }
839 inline llvm::AccessAsInstructionInfo::Access
840 llvm::AccessAsInstructionInfo::getTombstoneKey() {
841   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
842                 nullptr);
843 }
844 unsigned llvm::AccessAsInstructionInfo::getHashValue(
845     const llvm::AccessAsInstructionInfo::Access &A) {
846   return Base::getHashValue(A.getRemoteInst());
847 }
848 bool llvm::AccessAsInstructionInfo::isEqual(
849     const llvm::AccessAsInstructionInfo::Access &LHS,
850     const llvm::AccessAsInstructionInfo::Access &RHS) {
851   return LHS.getRemoteInst() == RHS.getRemoteInst();
852 }
853 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
854 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
855   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
856                                nullptr);
857 }
858 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
859 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
860   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
861                                nullptr);
862 }
863 
864 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
865     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
866   return detail::combineHashValue(
867              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
868              (A.isWrittenValueYetUndetermined()
869                   ? ~0
870                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
871          A.getKind();
872 }
873 
874 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
875     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
876     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
877   return LHS == RHS;
878 }
879 ///}
880 
881 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
882 struct AA::PointerInfo::State : public AbstractState {
883 
884   /// Return the best possible representable state.
885   static State getBestState(const State &SIS) { return State(); }
886 
887   /// Return the worst possible representable state.
888   static State getWorstState(const State &SIS) {
889     State R;
890     R.indicatePessimisticFixpoint();
891     return R;
892   }
893 
894   State() {}
895   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
896   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
897 
898   const State &getAssumed() const { return *this; }
899 
900   /// See AbstractState::isValidState().
901   bool isValidState() const override { return BS.isValidState(); }
902 
903   /// See AbstractState::isAtFixpoint().
904   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
905 
906   /// See AbstractState::indicateOptimisticFixpoint().
907   ChangeStatus indicateOptimisticFixpoint() override {
908     BS.indicateOptimisticFixpoint();
909     return ChangeStatus::UNCHANGED;
910   }
911 
912   /// See AbstractState::indicatePessimisticFixpoint().
913   ChangeStatus indicatePessimisticFixpoint() override {
914     BS.indicatePessimisticFixpoint();
915     return ChangeStatus::CHANGED;
916   }
917 
918   State &operator=(const State &R) {
919     if (this == &R)
920       return *this;
921     BS = R.BS;
922     AccessBins = R.AccessBins;
923     return *this;
924   }
925 
926   State &operator=(State &&R) {
927     if (this == &R)
928       return *this;
929     std::swap(BS, R.BS);
930     std::swap(AccessBins, R.AccessBins);
931     return *this;
932   }
933 
934   bool operator==(const State &R) const {
935     if (BS != R.BS)
936       return false;
937     if (AccessBins.size() != R.AccessBins.size())
938       return false;
939     auto It = begin(), RIt = R.begin(), E = end();
940     while (It != E) {
941       if (It->getFirst() != RIt->getFirst())
942         return false;
943       auto &Accs = It->getSecond();
944       auto &RAccs = RIt->getSecond();
945       if (Accs.size() != RAccs.size())
946         return false;
947       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
948       while (AccIt != AccE) {
949         if (*AccIt != *RAccIt)
950           return false;
951         ++AccIt;
952         ++RAccIt;
953       }
954       ++It;
955       ++RIt;
956     }
957     return true;
958   }
959   bool operator!=(const State &R) const { return !(*this == R); }
960 
961   /// We store accesses in a set with the instruction as key.
962   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
963 
964   /// We store all accesses in bins denoted by their offset and size.
965   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
966 
967   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
968   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
969 
970 protected:
971   /// The bins with all the accesses for the associated pointer.
972   DenseMap<OffsetAndSize, Accesses> AccessBins;
973 
974   /// Add a new access to the state at offset \p Offset and with size \p Size.
975   /// The access is associated with \p I, writes \p Content (if anything), and
976   /// is of kind \p Kind.
977   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
978   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
979                          Optional<Value *> Content,
980                          AAPointerInfo::AccessKind Kind, Type *Ty,
981                          Instruction *RemoteI = nullptr,
982                          Accesses *BinPtr = nullptr) {
983     OffsetAndSize Key{Offset, Size};
984     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
985     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
986     // Check if we have an access for this instruction in this bin, if not,
987     // simply add it.
988     auto It = Bin.find(Acc);
989     if (It == Bin.end()) {
990       Bin.insert(Acc);
991       return ChangeStatus::CHANGED;
992     }
993     // If the existing access is the same as then new one, nothing changed.
994     AAPointerInfo::Access Before = *It;
995     // The new one will be combined with the existing one.
996     *It &= Acc;
997     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
998   }
999 
1000   /// See AAPointerInfo::forallInterferingAccesses.
1001   bool forallInterferingAccesses(
1002       Instruction &I,
1003       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1004     if (!isValidState())
1005       return false;
1006     // First find the offset and size of I.
1007     OffsetAndSize OAS(-1, -1);
1008     for (auto &It : AccessBins) {
1009       for (auto &Access : It.getSecond()) {
1010         if (Access.getRemoteInst() == &I) {
1011           OAS = It.getFirst();
1012           break;
1013         }
1014       }
1015       if (OAS.getSize() != -1)
1016         break;
1017     }
1018     if (OAS.getSize() == -1)
1019       return true;
1020 
1021     // Now that we have an offset and size, find all overlapping ones and use
1022     // the callback on the accesses.
1023     for (auto &It : AccessBins) {
1024       OffsetAndSize ItOAS = It.getFirst();
1025       if (!OAS.mayOverlap(ItOAS))
1026         continue;
1027       for (auto &Access : It.getSecond())
1028         if (!CB(Access, OAS == ItOAS))
1029           return false;
1030     }
1031     return true;
1032   }
1033 
1034 private:
1035   /// State to track fixpoint and validity.
1036   BooleanState BS;
1037 };
1038 
1039 namespace {
1040 struct AAPointerInfoImpl
1041     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1042   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1043   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1044 
1045   /// See AbstractAttribute::initialize(...).
1046   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1047 
1048   /// See AbstractAttribute::getAsStr().
1049   const std::string getAsStr() const override {
1050     return std::string("PointerInfo ") +
1051            (isValidState() ? (std::string("#") +
1052                               std::to_string(AccessBins.size()) + " bins")
1053                            : "<invalid>");
1054   }
1055 
1056   /// See AbstractAttribute::manifest(...).
1057   ChangeStatus manifest(Attributor &A) override {
1058     return AAPointerInfo::manifest(A);
1059   }
1060 
1061   bool forallInterferingAccesses(
1062       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1063       const override {
1064     return State::forallInterferingAccesses(LI, CB);
1065   }
1066   bool forallInterferingAccesses(
1067       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1068       const override {
1069     return State::forallInterferingAccesses(SI, CB);
1070   }
1071 
1072   ChangeStatus translateAndAddCalleeState(Attributor &A,
1073                                           const AAPointerInfo &CalleeAA,
1074                                           int64_t CallArgOffset, CallBase &CB) {
1075     using namespace AA::PointerInfo;
1076     if (!CalleeAA.getState().isValidState() || !isValidState())
1077       return indicatePessimisticFixpoint();
1078 
1079     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1080     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1081 
1082     // Combine the accesses bin by bin.
1083     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1084     for (auto &It : CalleeImplAA.getState()) {
1085       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1086       if (CallArgOffset != OffsetAndSize::Unknown)
1087         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1088                             It.first.getSize());
1089       Accesses &Bin = AccessBins[OAS];
1090       for (const AAPointerInfo::Access &RAcc : It.second) {
1091         if (IsByval && !RAcc.isRead())
1092           continue;
1093         bool UsedAssumedInformation = false;
1094         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1095             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1096         AccessKind AK =
1097             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1098                                                  : AccessKind::AK_READ_WRITE));
1099         Changed =
1100             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1101                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1102       }
1103     }
1104     return Changed;
1105   }
1106 
1107   /// Statistic tracking for all AAPointerInfo implementations.
1108   /// See AbstractAttribute::trackStatistics().
1109   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1110 };
1111 
1112 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1113   using AccessKind = AAPointerInfo::AccessKind;
1114   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1115       : AAPointerInfoImpl(IRP, A) {}
1116 
1117   /// See AbstractAttribute::initialize(...).
1118   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1119 
1120   /// Deal with an access and signal if it was handled successfully.
1121   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1122                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1123                     ChangeStatus &Changed, Type *Ty,
1124                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1125     using namespace AA::PointerInfo;
1126     // No need to find a size if one is given or the offset is unknown.
1127     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1128         Ty) {
1129       const DataLayout &DL = A.getDataLayout();
1130       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1131       if (!AccessSize.isScalable())
1132         Size = AccessSize.getFixedSize();
1133     }
1134     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1135     return true;
1136   };
1137 
1138   /// Helper struct, will support ranges eventually.
1139   struct OffsetInfo {
1140     int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1141 
1142     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1143   };
1144 
1145   /// See AbstractAttribute::updateImpl(...).
1146   ChangeStatus updateImpl(Attributor &A) override {
1147     using namespace AA::PointerInfo;
1148     State S = getState();
1149     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1150     Value &AssociatedValue = getAssociatedValue();
1151 
1152     const DataLayout &DL = A.getDataLayout();
1153     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1154     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1155 
1156     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1157                                      bool &Follow) {
1158       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1159       UsrOI = PtrOI;
1160       Follow = true;
1161       return true;
1162     };
1163 
1164     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1165       Value *CurPtr = U.get();
1166       User *Usr = U.getUser();
1167       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1168                         << *Usr << "\n");
1169 
1170       OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1171 
1172       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1173         if (CE->isCast())
1174           return HandlePassthroughUser(Usr, PtrOI, Follow);
1175         if (CE->isCompare())
1176           return true;
1177         if (!CE->isGEPWithNoNotionalOverIndexing()) {
1178           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1179                             << "\n");
1180           return false;
1181         }
1182       }
1183       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1184         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1185         UsrOI = PtrOI;
1186 
1187         // TODO: Use range information.
1188         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1189             !GEP->hasAllConstantIndices()) {
1190           UsrOI.Offset = OffsetAndSize::Unknown;
1191           Follow = true;
1192           return true;
1193         }
1194 
1195         SmallVector<Value *, 8> Indices;
1196         for (Use &Idx : GEP->indices()) {
1197           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1198             Indices.push_back(CIdx);
1199             continue;
1200           }
1201 
1202           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1203                             << " : " << *Idx << "\n");
1204           return false;
1205         }
1206         UsrOI.Offset = PtrOI.Offset +
1207                        DL.getIndexedOffsetInType(
1208                            CurPtr->getType()->getPointerElementType(), Indices);
1209         Follow = true;
1210         return true;
1211       }
1212       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1213         return HandlePassthroughUser(Usr, PtrOI, Follow);
1214 
1215       // For PHIs we need to take care of the recurrence explicitly as the value
1216       // might change while we iterate through a loop. For now, we give up if
1217       // the PHI is not invariant.
1218       if (isa<PHINode>(Usr)) {
1219         // Check if the PHI is invariant (so far).
1220         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1221         if (UsrOI == PtrOI)
1222           return true;
1223 
1224         // Check if the PHI operand has already an unknown offset as we can't
1225         // improve on that anymore.
1226         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1227           UsrOI = PtrOI;
1228           Follow = true;
1229           return true;
1230         }
1231 
1232         // Check if the PHI operand is not dependent on the PHI itself.
1233         // TODO: This is not great as we look at the pointer type. However, it
1234         // is unclear where the Offset size comes from with typeless pointers.
1235         APInt Offset(
1236             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1237             0);
1238         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1239                                     DL, Offset, /* AllowNonInbounds */ true)) {
1240           if (Offset != PtrOI.Offset) {
1241             LLVM_DEBUG(dbgs()
1242                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1243                        << *CurPtr << " in " << *Usr << "\n");
1244             return false;
1245           }
1246           return HandlePassthroughUser(Usr, PtrOI, Follow);
1247         }
1248 
1249         // TODO: Approximate in case we know the direction of the recurrence.
1250         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1251                           << *CurPtr << " in " << *Usr << "\n");
1252         UsrOI = PtrOI;
1253         UsrOI.Offset = OffsetAndSize::Unknown;
1254         Follow = true;
1255         return true;
1256       }
1257 
1258       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1259         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1260                             AccessKind::AK_READ, PtrOI.Offset, Changed,
1261                             LoadI->getType());
1262       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1263         if (StoreI->getValueOperand() == CurPtr) {
1264           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1265                             << *StoreI << "\n");
1266           return false;
1267         }
1268         bool UsedAssumedInformation = false;
1269         Optional<Value *> Content = A.getAssumedSimplified(
1270             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1271         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1272                             PtrOI.Offset, Changed,
1273                             StoreI->getValueOperand()->getType());
1274       }
1275       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1276         if (CB->isLifetimeStartOrEnd())
1277           return true;
1278         if (CB->isArgOperand(&U)) {
1279           unsigned ArgNo = CB->getArgOperandNo(&U);
1280           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1281               *this, IRPosition::callsite_argument(*CB, ArgNo),
1282               DepClassTy::REQUIRED);
1283           Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1284                     Changed;
1285           return true;
1286         }
1287         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1288                           << "\n");
1289         // TODO: Allow some call uses
1290         return false;
1291       }
1292 
1293       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1294       return false;
1295     };
1296     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1297                            /* CheckBBLivenessOnly */ true))
1298       return indicatePessimisticFixpoint();
1299 
1300     LLVM_DEBUG({
1301       dbgs() << "Accesses by bin after update:\n";
1302       for (auto &It : AccessBins) {
1303         dbgs() << "[" << It.first.getOffset() << "-"
1304                << It.first.getOffset() + It.first.getSize()
1305                << "] : " << It.getSecond().size() << "\n";
1306         for (auto &Acc : It.getSecond()) {
1307           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1308                  << "\n";
1309           if (Acc.getLocalInst() != Acc.getRemoteInst())
1310             dbgs() << "     -->                         "
1311                    << *Acc.getRemoteInst() << "\n";
1312           if (!Acc.isWrittenValueYetUndetermined())
1313             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1314         }
1315       }
1316     });
1317 
1318     return Changed;
1319   }
1320 
1321   /// See AbstractAttribute::trackStatistics()
1322   void trackStatistics() const override {
1323     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1324   }
1325 };
1326 
1327 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1328   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1329       : AAPointerInfoImpl(IRP, A) {}
1330 
1331   /// See AbstractAttribute::updateImpl(...).
1332   ChangeStatus updateImpl(Attributor &A) override {
1333     return indicatePessimisticFixpoint();
1334   }
1335 
1336   /// See AbstractAttribute::trackStatistics()
1337   void trackStatistics() const override {
1338     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1339   }
1340 };
1341 
1342 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1343   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1344       : AAPointerInfoFloating(IRP, A) {}
1345 
1346   /// See AbstractAttribute::initialize(...).
1347   void initialize(Attributor &A) override {
1348     AAPointerInfoFloating::initialize(A);
1349     if (getAnchorScope()->isDeclaration())
1350       indicatePessimisticFixpoint();
1351   }
1352 
1353   /// See AbstractAttribute::trackStatistics()
1354   void trackStatistics() const override {
1355     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1356   }
1357 };
1358 
1359 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1360   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1361       : AAPointerInfoFloating(IRP, A) {}
1362 
1363   /// See AbstractAttribute::updateImpl(...).
1364   ChangeStatus updateImpl(Attributor &A) override {
1365     using namespace AA::PointerInfo;
1366     // We handle memory intrinsics explicitly, at least the first (=
1367     // destination) and second (=source) arguments as we know how they are
1368     // accessed.
1369     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1370       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1371       int64_t LengthVal = OffsetAndSize::Unknown;
1372       if (Length)
1373         LengthVal = Length->getSExtValue();
1374       Value &Ptr = getAssociatedValue();
1375       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1376       ChangeStatus Changed;
1377       if (ArgNo == 0) {
1378         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1379                      nullptr, LengthVal);
1380       } else if (ArgNo == 1) {
1381         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1382                      nullptr, LengthVal);
1383       } else {
1384         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1385                           << *MI << "\n");
1386         return indicatePessimisticFixpoint();
1387       }
1388       return Changed;
1389     }
1390 
1391     // TODO: Once we have call site specific value information we can provide
1392     //       call site specific liveness information and then it makes
1393     //       sense to specialize attributes for call sites arguments instead of
1394     //       redirecting requests to the callee argument.
1395     Argument *Arg = getAssociatedArgument();
1396     if (!Arg)
1397       return indicatePessimisticFixpoint();
1398     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1399     auto &ArgAA =
1400         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1401     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1402   }
1403 
1404   /// See AbstractAttribute::trackStatistics()
1405   void trackStatistics() const override {
1406     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1407   }
1408 };
1409 
1410 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1411   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1412       : AAPointerInfoFloating(IRP, A) {}
1413 
1414   /// See AbstractAttribute::trackStatistics()
1415   void trackStatistics() const override {
1416     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1417   }
1418 };
1419 
1420 /// -----------------------NoUnwind Function Attribute--------------------------
1421 
1422 struct AANoUnwindImpl : AANoUnwind {
1423   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1424 
1425   const std::string getAsStr() const override {
1426     return getAssumed() ? "nounwind" : "may-unwind";
1427   }
1428 
1429   /// See AbstractAttribute::updateImpl(...).
1430   ChangeStatus updateImpl(Attributor &A) override {
1431     auto Opcodes = {
1432         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1433         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1434         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1435 
1436     auto CheckForNoUnwind = [&](Instruction &I) {
1437       if (!I.mayThrow())
1438         return true;
1439 
1440       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1441         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1442             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1443         return NoUnwindAA.isAssumedNoUnwind();
1444       }
1445       return false;
1446     };
1447 
1448     bool UsedAssumedInformation = false;
1449     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1450                                    UsedAssumedInformation))
1451       return indicatePessimisticFixpoint();
1452 
1453     return ChangeStatus::UNCHANGED;
1454   }
1455 };
1456 
1457 struct AANoUnwindFunction final : public AANoUnwindImpl {
1458   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1459       : AANoUnwindImpl(IRP, A) {}
1460 
1461   /// See AbstractAttribute::trackStatistics()
1462   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1463 };
1464 
1465 /// NoUnwind attribute deduction for a call sites.
1466 struct AANoUnwindCallSite final : AANoUnwindImpl {
1467   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1468       : AANoUnwindImpl(IRP, A) {}
1469 
1470   /// See AbstractAttribute::initialize(...).
1471   void initialize(Attributor &A) override {
1472     AANoUnwindImpl::initialize(A);
1473     Function *F = getAssociatedFunction();
1474     if (!F || F->isDeclaration())
1475       indicatePessimisticFixpoint();
1476   }
1477 
1478   /// See AbstractAttribute::updateImpl(...).
1479   ChangeStatus updateImpl(Attributor &A) override {
1480     // TODO: Once we have call site specific value information we can provide
1481     //       call site specific liveness information and then it makes
1482     //       sense to specialize attributes for call sites arguments instead of
1483     //       redirecting requests to the callee argument.
1484     Function *F = getAssociatedFunction();
1485     const IRPosition &FnPos = IRPosition::function(*F);
1486     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1487     return clampStateAndIndicateChange(getState(), FnAA.getState());
1488   }
1489 
1490   /// See AbstractAttribute::trackStatistics()
1491   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1492 };
1493 
1494 /// --------------------- Function Return Values -------------------------------
1495 
1496 /// "Attribute" that collects all potential returned values and the return
1497 /// instructions that they arise from.
1498 ///
1499 /// If there is a unique returned value R, the manifest method will:
1500 ///   - mark R with the "returned" attribute, if R is an argument.
1501 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1502 
1503   /// Mapping of values potentially returned by the associated function to the
1504   /// return instructions that might return them.
1505   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1506 
1507   /// State flags
1508   ///
1509   ///{
1510   bool IsFixed = false;
1511   bool IsValidState = true;
1512   ///}
1513 
1514 public:
1515   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1516       : AAReturnedValues(IRP, A) {}
1517 
1518   /// See AbstractAttribute::initialize(...).
1519   void initialize(Attributor &A) override {
1520     // Reset the state.
1521     IsFixed = false;
1522     IsValidState = true;
1523     ReturnedValues.clear();
1524 
1525     Function *F = getAssociatedFunction();
1526     if (!F || F->isDeclaration()) {
1527       indicatePessimisticFixpoint();
1528       return;
1529     }
1530     assert(!F->getReturnType()->isVoidTy() &&
1531            "Did not expect a void return type!");
1532 
1533     // The map from instruction opcodes to those instructions in the function.
1534     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1535 
1536     // Look through all arguments, if one is marked as returned we are done.
1537     for (Argument &Arg : F->args()) {
1538       if (Arg.hasReturnedAttr()) {
1539         auto &ReturnInstSet = ReturnedValues[&Arg];
1540         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1541           for (Instruction *RI : *Insts)
1542             ReturnInstSet.insert(cast<ReturnInst>(RI));
1543 
1544         indicateOptimisticFixpoint();
1545         return;
1546       }
1547     }
1548 
1549     if (!A.isFunctionIPOAmendable(*F))
1550       indicatePessimisticFixpoint();
1551   }
1552 
1553   /// See AbstractAttribute::manifest(...).
1554   ChangeStatus manifest(Attributor &A) override;
1555 
1556   /// See AbstractAttribute::getState(...).
1557   AbstractState &getState() override { return *this; }
1558 
1559   /// See AbstractAttribute::getState(...).
1560   const AbstractState &getState() const override { return *this; }
1561 
1562   /// See AbstractAttribute::updateImpl(Attributor &A).
1563   ChangeStatus updateImpl(Attributor &A) override;
1564 
1565   llvm::iterator_range<iterator> returned_values() override {
1566     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1567   }
1568 
1569   llvm::iterator_range<const_iterator> returned_values() const override {
1570     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1571   }
1572 
1573   /// Return the number of potential return values, -1 if unknown.
1574   size_t getNumReturnValues() const override {
1575     return isValidState() ? ReturnedValues.size() : -1;
1576   }
1577 
1578   /// Return an assumed unique return value if a single candidate is found. If
1579   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1580   /// Optional::NoneType.
1581   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1582 
1583   /// See AbstractState::checkForAllReturnedValues(...).
1584   bool checkForAllReturnedValuesAndReturnInsts(
1585       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1586       const override;
1587 
1588   /// Pretty print the attribute similar to the IR representation.
1589   const std::string getAsStr() const override;
1590 
1591   /// See AbstractState::isAtFixpoint().
1592   bool isAtFixpoint() const override { return IsFixed; }
1593 
1594   /// See AbstractState::isValidState().
1595   bool isValidState() const override { return IsValidState; }
1596 
1597   /// See AbstractState::indicateOptimisticFixpoint(...).
1598   ChangeStatus indicateOptimisticFixpoint() override {
1599     IsFixed = true;
1600     return ChangeStatus::UNCHANGED;
1601   }
1602 
1603   ChangeStatus indicatePessimisticFixpoint() override {
1604     IsFixed = true;
1605     IsValidState = false;
1606     return ChangeStatus::CHANGED;
1607   }
1608 };
1609 
1610 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1611   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1612 
1613   // Bookkeeping.
1614   assert(isValidState());
1615   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1616                   "Number of function with known return values");
1617 
1618   // Check if we have an assumed unique return value that we could manifest.
1619   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1620 
1621   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1622     return Changed;
1623 
1624   // Bookkeeping.
1625   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1626                   "Number of function with unique return");
1627   // If the assumed unique return value is an argument, annotate it.
1628   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1629     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1630             getAssociatedFunction()->getReturnType())) {
1631       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1632       Changed = IRAttribute::manifest(A);
1633     }
1634   }
1635   return Changed;
1636 }
1637 
1638 const std::string AAReturnedValuesImpl::getAsStr() const {
1639   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1640          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1641 }
1642 
1643 Optional<Value *>
1644 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1645   // If checkForAllReturnedValues provides a unique value, ignoring potential
1646   // undef values that can also be present, it is assumed to be the actual
1647   // return value and forwarded to the caller of this method. If there are
1648   // multiple, a nullptr is returned indicating there cannot be a unique
1649   // returned value.
1650   Optional<Value *> UniqueRV;
1651   Type *Ty = getAssociatedFunction()->getReturnType();
1652 
1653   auto Pred = [&](Value &RV) -> bool {
1654     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1655     return UniqueRV != Optional<Value *>(nullptr);
1656   };
1657 
1658   if (!A.checkForAllReturnedValues(Pred, *this))
1659     UniqueRV = nullptr;
1660 
1661   return UniqueRV;
1662 }
1663 
1664 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1665     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1666     const {
1667   if (!isValidState())
1668     return false;
1669 
1670   // Check all returned values but ignore call sites as long as we have not
1671   // encountered an overdefined one during an update.
1672   for (auto &It : ReturnedValues) {
1673     Value *RV = It.first;
1674     if (!Pred(*RV, It.second))
1675       return false;
1676   }
1677 
1678   return true;
1679 }
1680 
1681 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1682   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1683 
1684   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1685                            bool) -> bool {
1686     bool UsedAssumedInformation = false;
1687     Optional<Value *> SimpleRetVal =
1688         A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1689     if (!SimpleRetVal.hasValue())
1690       return true;
1691     if (!SimpleRetVal.getValue())
1692       return false;
1693     Value *RetVal = *SimpleRetVal;
1694     assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1695            "Assumed returned value should be valid in function scope!");
1696     if (ReturnedValues[RetVal].insert(&Ret))
1697       Changed = ChangeStatus::CHANGED;
1698     return true;
1699   };
1700 
1701   auto ReturnInstCB = [&](Instruction &I) {
1702     ReturnInst &Ret = cast<ReturnInst>(I);
1703     return genericValueTraversal<ReturnInst>(
1704         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1705         &I);
1706   };
1707 
1708   // Discover returned values from all live returned instructions in the
1709   // associated function.
1710   bool UsedAssumedInformation = false;
1711   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1712                                  UsedAssumedInformation))
1713     return indicatePessimisticFixpoint();
1714   return Changed;
1715 }
1716 
1717 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1718   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1719       : AAReturnedValuesImpl(IRP, A) {}
1720 
1721   /// See AbstractAttribute::trackStatistics()
1722   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1723 };
1724 
1725 /// Returned values information for a call sites.
1726 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1727   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1728       : AAReturnedValuesImpl(IRP, A) {}
1729 
1730   /// See AbstractAttribute::initialize(...).
1731   void initialize(Attributor &A) override {
1732     // TODO: Once we have call site specific value information we can provide
1733     //       call site specific liveness information and then it makes
1734     //       sense to specialize attributes for call sites instead of
1735     //       redirecting requests to the callee.
1736     llvm_unreachable("Abstract attributes for returned values are not "
1737                      "supported for call sites yet!");
1738   }
1739 
1740   /// See AbstractAttribute::updateImpl(...).
1741   ChangeStatus updateImpl(Attributor &A) override {
1742     return indicatePessimisticFixpoint();
1743   }
1744 
1745   /// See AbstractAttribute::trackStatistics()
1746   void trackStatistics() const override {}
1747 };
1748 
1749 /// ------------------------ NoSync Function Attribute -------------------------
1750 
1751 struct AANoSyncImpl : AANoSync {
1752   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1753 
1754   const std::string getAsStr() const override {
1755     return getAssumed() ? "nosync" : "may-sync";
1756   }
1757 
1758   /// See AbstractAttribute::updateImpl(...).
1759   ChangeStatus updateImpl(Attributor &A) override;
1760 
1761   /// Helper function used to determine whether an instruction is non-relaxed
1762   /// atomic. In other words, if an atomic instruction does not have unordered
1763   /// or monotonic ordering
1764   static bool isNonRelaxedAtomic(Instruction *I);
1765 
1766   /// Helper function specific for intrinsics which are potentially volatile
1767   static bool isNoSyncIntrinsic(Instruction *I);
1768 };
1769 
1770 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1771   if (!I->isAtomic())
1772     return false;
1773 
1774   if (auto *FI = dyn_cast<FenceInst>(I))
1775     // All legal orderings for fence are stronger than monotonic.
1776     return FI->getSyncScopeID() != SyncScope::SingleThread;
1777   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1778     // Unordered is not a legal ordering for cmpxchg.
1779     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1780             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1781   }
1782 
1783   AtomicOrdering Ordering;
1784   switch (I->getOpcode()) {
1785   case Instruction::AtomicRMW:
1786     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1787     break;
1788   case Instruction::Store:
1789     Ordering = cast<StoreInst>(I)->getOrdering();
1790     break;
1791   case Instruction::Load:
1792     Ordering = cast<LoadInst>(I)->getOrdering();
1793     break;
1794   default:
1795     llvm_unreachable(
1796         "New atomic operations need to be known in the attributor.");
1797   }
1798 
1799   return (Ordering != AtomicOrdering::Unordered &&
1800           Ordering != AtomicOrdering::Monotonic);
1801 }
1802 
1803 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1804 /// which would be nosync except that they have a volatile flag.  All other
1805 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1806 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1807   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1808     return !MI->isVolatile();
1809   return false;
1810 }
1811 
1812 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1813 
1814   auto CheckRWInstForNoSync = [&](Instruction &I) {
1815     /// We are looking for volatile instructions or Non-Relaxed atomics.
1816 
1817     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1818       if (CB->hasFnAttr(Attribute::NoSync))
1819         return true;
1820 
1821       if (isNoSyncIntrinsic(&I))
1822         return true;
1823 
1824       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1825           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1826       return NoSyncAA.isAssumedNoSync();
1827     }
1828 
1829     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1830       return true;
1831 
1832     return false;
1833   };
1834 
1835   auto CheckForNoSync = [&](Instruction &I) {
1836     // At this point we handled all read/write effects and they are all
1837     // nosync, so they can be skipped.
1838     if (I.mayReadOrWriteMemory())
1839       return true;
1840 
1841     // non-convergent and readnone imply nosync.
1842     return !cast<CallBase>(I).isConvergent();
1843   };
1844 
1845   bool UsedAssumedInformation = false;
1846   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1847                                           UsedAssumedInformation) ||
1848       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1849                                          UsedAssumedInformation))
1850     return indicatePessimisticFixpoint();
1851 
1852   return ChangeStatus::UNCHANGED;
1853 }
1854 
1855 struct AANoSyncFunction final : public AANoSyncImpl {
1856   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1857       : AANoSyncImpl(IRP, A) {}
1858 
1859   /// See AbstractAttribute::trackStatistics()
1860   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1861 };
1862 
1863 /// NoSync attribute deduction for a call sites.
1864 struct AANoSyncCallSite final : AANoSyncImpl {
1865   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1866       : AANoSyncImpl(IRP, A) {}
1867 
1868   /// See AbstractAttribute::initialize(...).
1869   void initialize(Attributor &A) override {
1870     AANoSyncImpl::initialize(A);
1871     Function *F = getAssociatedFunction();
1872     if (!F || F->isDeclaration())
1873       indicatePessimisticFixpoint();
1874   }
1875 
1876   /// See AbstractAttribute::updateImpl(...).
1877   ChangeStatus updateImpl(Attributor &A) override {
1878     // TODO: Once we have call site specific value information we can provide
1879     //       call site specific liveness information and then it makes
1880     //       sense to specialize attributes for call sites arguments instead of
1881     //       redirecting requests to the callee argument.
1882     Function *F = getAssociatedFunction();
1883     const IRPosition &FnPos = IRPosition::function(*F);
1884     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1885     return clampStateAndIndicateChange(getState(), FnAA.getState());
1886   }
1887 
1888   /// See AbstractAttribute::trackStatistics()
1889   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1890 };
1891 
1892 /// ------------------------ No-Free Attributes ----------------------------
1893 
1894 struct AANoFreeImpl : public AANoFree {
1895   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1896 
1897   /// See AbstractAttribute::updateImpl(...).
1898   ChangeStatus updateImpl(Attributor &A) override {
1899     auto CheckForNoFree = [&](Instruction &I) {
1900       const auto &CB = cast<CallBase>(I);
1901       if (CB.hasFnAttr(Attribute::NoFree))
1902         return true;
1903 
1904       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1905           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1906       return NoFreeAA.isAssumedNoFree();
1907     };
1908 
1909     bool UsedAssumedInformation = false;
1910     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1911                                            UsedAssumedInformation))
1912       return indicatePessimisticFixpoint();
1913     return ChangeStatus::UNCHANGED;
1914   }
1915 
1916   /// See AbstractAttribute::getAsStr().
1917   const std::string getAsStr() const override {
1918     return getAssumed() ? "nofree" : "may-free";
1919   }
1920 };
1921 
1922 struct AANoFreeFunction final : public AANoFreeImpl {
1923   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1924       : AANoFreeImpl(IRP, A) {}
1925 
1926   /// See AbstractAttribute::trackStatistics()
1927   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1928 };
1929 
1930 /// NoFree attribute deduction for a call sites.
1931 struct AANoFreeCallSite final : AANoFreeImpl {
1932   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1933       : AANoFreeImpl(IRP, A) {}
1934 
1935   /// See AbstractAttribute::initialize(...).
1936   void initialize(Attributor &A) override {
1937     AANoFreeImpl::initialize(A);
1938     Function *F = getAssociatedFunction();
1939     if (!F || F->isDeclaration())
1940       indicatePessimisticFixpoint();
1941   }
1942 
1943   /// See AbstractAttribute::updateImpl(...).
1944   ChangeStatus updateImpl(Attributor &A) override {
1945     // TODO: Once we have call site specific value information we can provide
1946     //       call site specific liveness information and then it makes
1947     //       sense to specialize attributes for call sites arguments instead of
1948     //       redirecting requests to the callee argument.
1949     Function *F = getAssociatedFunction();
1950     const IRPosition &FnPos = IRPosition::function(*F);
1951     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1952     return clampStateAndIndicateChange(getState(), FnAA.getState());
1953   }
1954 
1955   /// See AbstractAttribute::trackStatistics()
1956   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1957 };
1958 
1959 /// NoFree attribute for floating values.
1960 struct AANoFreeFloating : AANoFreeImpl {
1961   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1962       : AANoFreeImpl(IRP, A) {}
1963 
1964   /// See AbstractAttribute::trackStatistics()
1965   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1966 
1967   /// See Abstract Attribute::updateImpl(...).
1968   ChangeStatus updateImpl(Attributor &A) override {
1969     const IRPosition &IRP = getIRPosition();
1970 
1971     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1972         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1973     if (NoFreeAA.isAssumedNoFree())
1974       return ChangeStatus::UNCHANGED;
1975 
1976     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1977     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1978       Instruction *UserI = cast<Instruction>(U.getUser());
1979       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1980         if (CB->isBundleOperand(&U))
1981           return false;
1982         if (!CB->isArgOperand(&U))
1983           return true;
1984         unsigned ArgNo = CB->getArgOperandNo(&U);
1985 
1986         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1987             *this, IRPosition::callsite_argument(*CB, ArgNo),
1988             DepClassTy::REQUIRED);
1989         return NoFreeArg.isAssumedNoFree();
1990       }
1991 
1992       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1993           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1994         Follow = true;
1995         return true;
1996       }
1997       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1998           isa<ReturnInst>(UserI))
1999         return true;
2000 
2001       // Unknown user.
2002       return false;
2003     };
2004     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2005       return indicatePessimisticFixpoint();
2006 
2007     return ChangeStatus::UNCHANGED;
2008   }
2009 };
2010 
2011 /// NoFree attribute for a call site argument.
2012 struct AANoFreeArgument final : AANoFreeFloating {
2013   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2014       : AANoFreeFloating(IRP, A) {}
2015 
2016   /// See AbstractAttribute::trackStatistics()
2017   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2018 };
2019 
2020 /// NoFree attribute for call site arguments.
2021 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2022   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2023       : AANoFreeFloating(IRP, A) {}
2024 
2025   /// See AbstractAttribute::updateImpl(...).
2026   ChangeStatus updateImpl(Attributor &A) override {
2027     // TODO: Once we have call site specific value information we can provide
2028     //       call site specific liveness information and then it makes
2029     //       sense to specialize attributes for call sites arguments instead of
2030     //       redirecting requests to the callee argument.
2031     Argument *Arg = getAssociatedArgument();
2032     if (!Arg)
2033       return indicatePessimisticFixpoint();
2034     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2035     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2036     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2037   }
2038 
2039   /// See AbstractAttribute::trackStatistics()
2040   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2041 };
2042 
2043 /// NoFree attribute for function return value.
2044 struct AANoFreeReturned final : AANoFreeFloating {
2045   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2046       : AANoFreeFloating(IRP, A) {
2047     llvm_unreachable("NoFree is not applicable to function returns!");
2048   }
2049 
2050   /// See AbstractAttribute::initialize(...).
2051   void initialize(Attributor &A) override {
2052     llvm_unreachable("NoFree is not applicable to function returns!");
2053   }
2054 
2055   /// See AbstractAttribute::updateImpl(...).
2056   ChangeStatus updateImpl(Attributor &A) override {
2057     llvm_unreachable("NoFree is not applicable to function returns!");
2058   }
2059 
2060   /// See AbstractAttribute::trackStatistics()
2061   void trackStatistics() const override {}
2062 };
2063 
2064 /// NoFree attribute deduction for a call site return value.
2065 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2066   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2067       : AANoFreeFloating(IRP, A) {}
2068 
2069   ChangeStatus manifest(Attributor &A) override {
2070     return ChangeStatus::UNCHANGED;
2071   }
2072   /// See AbstractAttribute::trackStatistics()
2073   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2074 };
2075 
2076 /// ------------------------ NonNull Argument Attribute ------------------------
2077 static int64_t getKnownNonNullAndDerefBytesForUse(
2078     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2079     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2080   TrackUse = false;
2081 
2082   const Value *UseV = U->get();
2083   if (!UseV->getType()->isPointerTy())
2084     return 0;
2085 
2086   // We need to follow common pointer manipulation uses to the accesses they
2087   // feed into. We can try to be smart to avoid looking through things we do not
2088   // like for now, e.g., non-inbounds GEPs.
2089   if (isa<CastInst>(I)) {
2090     TrackUse = true;
2091     return 0;
2092   }
2093 
2094   if (isa<GetElementPtrInst>(I)) {
2095     TrackUse = true;
2096     return 0;
2097   }
2098 
2099   Type *PtrTy = UseV->getType();
2100   const Function *F = I->getFunction();
2101   bool NullPointerIsDefined =
2102       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2103   const DataLayout &DL = A.getInfoCache().getDL();
2104   if (const auto *CB = dyn_cast<CallBase>(I)) {
2105     if (CB->isBundleOperand(U)) {
2106       if (RetainedKnowledge RK = getKnowledgeFromUse(
2107               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2108         IsNonNull |=
2109             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2110         return RK.ArgValue;
2111       }
2112       return 0;
2113     }
2114 
2115     if (CB->isCallee(U)) {
2116       IsNonNull |= !NullPointerIsDefined;
2117       return 0;
2118     }
2119 
2120     unsigned ArgNo = CB->getArgOperandNo(U);
2121     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2122     // As long as we only use known information there is no need to track
2123     // dependences here.
2124     auto &DerefAA =
2125         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2126     IsNonNull |= DerefAA.isKnownNonNull();
2127     return DerefAA.getKnownDereferenceableBytes();
2128   }
2129 
2130   int64_t Offset;
2131   const Value *Base =
2132       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2133   if (Base) {
2134     if (Base == &AssociatedValue &&
2135         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2136       int64_t DerefBytes =
2137           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2138 
2139       IsNonNull |= !NullPointerIsDefined;
2140       return std::max(int64_t(0), DerefBytes);
2141     }
2142   }
2143 
2144   /// Corner case when an offset is 0.
2145   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2146                                               /*AllowNonInbounds*/ true);
2147   if (Base) {
2148     if (Offset == 0 && Base == &AssociatedValue &&
2149         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2150       int64_t DerefBytes =
2151           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2152       IsNonNull |= !NullPointerIsDefined;
2153       return std::max(int64_t(0), DerefBytes);
2154     }
2155   }
2156 
2157   return 0;
2158 }
2159 
2160 struct AANonNullImpl : AANonNull {
2161   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2162       : AANonNull(IRP, A),
2163         NullIsDefined(NullPointerIsDefined(
2164             getAnchorScope(),
2165             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2166 
2167   /// See AbstractAttribute::initialize(...).
2168   void initialize(Attributor &A) override {
2169     Value &V = getAssociatedValue();
2170     if (!NullIsDefined &&
2171         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2172                 /* IgnoreSubsumingPositions */ false, &A)) {
2173       indicateOptimisticFixpoint();
2174       return;
2175     }
2176 
2177     if (isa<ConstantPointerNull>(V)) {
2178       indicatePessimisticFixpoint();
2179       return;
2180     }
2181 
2182     AANonNull::initialize(A);
2183 
2184     bool CanBeNull, CanBeFreed;
2185     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2186                                          CanBeFreed)) {
2187       if (!CanBeNull) {
2188         indicateOptimisticFixpoint();
2189         return;
2190       }
2191     }
2192 
2193     if (isa<GlobalValue>(&getAssociatedValue())) {
2194       indicatePessimisticFixpoint();
2195       return;
2196     }
2197 
2198     if (Instruction *CtxI = getCtxI())
2199       followUsesInMBEC(*this, A, getState(), *CtxI);
2200   }
2201 
2202   /// See followUsesInMBEC
2203   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2204                        AANonNull::StateType &State) {
2205     bool IsNonNull = false;
2206     bool TrackUse = false;
2207     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2208                                        IsNonNull, TrackUse);
2209     State.setKnown(IsNonNull);
2210     return TrackUse;
2211   }
2212 
2213   /// See AbstractAttribute::getAsStr().
2214   const std::string getAsStr() const override {
2215     return getAssumed() ? "nonnull" : "may-null";
2216   }
2217 
2218   /// Flag to determine if the underlying value can be null and still allow
2219   /// valid accesses.
2220   const bool NullIsDefined;
2221 };
2222 
2223 /// NonNull attribute for a floating value.
2224 struct AANonNullFloating : public AANonNullImpl {
2225   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2226       : AANonNullImpl(IRP, A) {}
2227 
2228   /// See AbstractAttribute::updateImpl(...).
2229   ChangeStatus updateImpl(Attributor &A) override {
2230     const DataLayout &DL = A.getDataLayout();
2231 
2232     DominatorTree *DT = nullptr;
2233     AssumptionCache *AC = nullptr;
2234     InformationCache &InfoCache = A.getInfoCache();
2235     if (const Function *Fn = getAnchorScope()) {
2236       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2237       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2238     }
2239 
2240     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2241                             AANonNull::StateType &T, bool Stripped) -> bool {
2242       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2243                                              DepClassTy::REQUIRED);
2244       if (!Stripped && this == &AA) {
2245         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2246           T.indicatePessimisticFixpoint();
2247       } else {
2248         // Use abstract attribute information.
2249         const AANonNull::StateType &NS = AA.getState();
2250         T ^= NS;
2251       }
2252       return T.isValidState();
2253     };
2254 
2255     StateType T;
2256     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2257                                           VisitValueCB, getCtxI()))
2258       return indicatePessimisticFixpoint();
2259 
2260     return clampStateAndIndicateChange(getState(), T);
2261   }
2262 
2263   /// See AbstractAttribute::trackStatistics()
2264   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2265 };
2266 
2267 /// NonNull attribute for function return value.
2268 struct AANonNullReturned final
2269     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2270   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2271       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2272 
2273   /// See AbstractAttribute::getAsStr().
2274   const std::string getAsStr() const override {
2275     return getAssumed() ? "nonnull" : "may-null";
2276   }
2277 
2278   /// See AbstractAttribute::trackStatistics()
2279   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2280 };
2281 
2282 /// NonNull attribute for function argument.
2283 struct AANonNullArgument final
2284     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2285   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2286       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2287 
2288   /// See AbstractAttribute::trackStatistics()
2289   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2290 };
2291 
2292 struct AANonNullCallSiteArgument final : AANonNullFloating {
2293   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2294       : AANonNullFloating(IRP, A) {}
2295 
2296   /// See AbstractAttribute::trackStatistics()
2297   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2298 };
2299 
2300 /// NonNull attribute for a call site return position.
2301 struct AANonNullCallSiteReturned final
2302     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2303   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2304       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2305 
2306   /// See AbstractAttribute::trackStatistics()
2307   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2308 };
2309 
2310 /// ------------------------ No-Recurse Attributes ----------------------------
2311 
2312 struct AANoRecurseImpl : public AANoRecurse {
2313   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2314 
2315   /// See AbstractAttribute::getAsStr()
2316   const std::string getAsStr() const override {
2317     return getAssumed() ? "norecurse" : "may-recurse";
2318   }
2319 };
2320 
2321 struct AANoRecurseFunction final : AANoRecurseImpl {
2322   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2323       : AANoRecurseImpl(IRP, A) {}
2324 
2325   /// See AbstractAttribute::initialize(...).
2326   void initialize(Attributor &A) override {
2327     AANoRecurseImpl::initialize(A);
2328     if (const Function *F = getAnchorScope())
2329       if (A.getInfoCache().getSccSize(*F) != 1)
2330         indicatePessimisticFixpoint();
2331   }
2332 
2333   /// See AbstractAttribute::updateImpl(...).
2334   ChangeStatus updateImpl(Attributor &A) override {
2335 
2336     // If all live call sites are known to be no-recurse, we are as well.
2337     auto CallSitePred = [&](AbstractCallSite ACS) {
2338       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2339           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2340           DepClassTy::NONE);
2341       return NoRecurseAA.isKnownNoRecurse();
2342     };
2343     bool AllCallSitesKnown;
2344     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2345       // If we know all call sites and all are known no-recurse, we are done.
2346       // If all known call sites, which might not be all that exist, are known
2347       // to be no-recurse, we are not done but we can continue to assume
2348       // no-recurse. If one of the call sites we have not visited will become
2349       // live, another update is triggered.
2350       if (AllCallSitesKnown)
2351         indicateOptimisticFixpoint();
2352       return ChangeStatus::UNCHANGED;
2353     }
2354 
2355     // If the above check does not hold anymore we look at the calls.
2356     auto CheckForNoRecurse = [&](Instruction &I) {
2357       const auto &CB = cast<CallBase>(I);
2358       if (CB.hasFnAttr(Attribute::NoRecurse))
2359         return true;
2360 
2361       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2362           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2363       if (!NoRecurseAA.isAssumedNoRecurse())
2364         return false;
2365 
2366       // Recursion to the same function
2367       if (CB.getCalledFunction() == getAnchorScope())
2368         return false;
2369 
2370       return true;
2371     };
2372 
2373     bool UsedAssumedInformation = false;
2374     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2375                                            UsedAssumedInformation))
2376       return indicatePessimisticFixpoint();
2377     return ChangeStatus::UNCHANGED;
2378   }
2379 
2380   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2381 };
2382 
2383 /// NoRecurse attribute deduction for a call sites.
2384 struct AANoRecurseCallSite final : AANoRecurseImpl {
2385   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2386       : AANoRecurseImpl(IRP, A) {}
2387 
2388   /// See AbstractAttribute::initialize(...).
2389   void initialize(Attributor &A) override {
2390     AANoRecurseImpl::initialize(A);
2391     Function *F = getAssociatedFunction();
2392     if (!F || F->isDeclaration())
2393       indicatePessimisticFixpoint();
2394   }
2395 
2396   /// See AbstractAttribute::updateImpl(...).
2397   ChangeStatus updateImpl(Attributor &A) override {
2398     // TODO: Once we have call site specific value information we can provide
2399     //       call site specific liveness information and then it makes
2400     //       sense to specialize attributes for call sites arguments instead of
2401     //       redirecting requests to the callee argument.
2402     Function *F = getAssociatedFunction();
2403     const IRPosition &FnPos = IRPosition::function(*F);
2404     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2405     return clampStateAndIndicateChange(getState(), FnAA.getState());
2406   }
2407 
2408   /// See AbstractAttribute::trackStatistics()
2409   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2410 };
2411 
2412 /// -------------------- Undefined-Behavior Attributes ------------------------
2413 
2414 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2415   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2416       : AAUndefinedBehavior(IRP, A) {}
2417 
2418   /// See AbstractAttribute::updateImpl(...).
2419   // through a pointer (i.e. also branches etc.)
2420   ChangeStatus updateImpl(Attributor &A) override {
2421     const size_t UBPrevSize = KnownUBInsts.size();
2422     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2423 
2424     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2425       // Lang ref now states volatile store is not UB, let's skip them.
2426       if (I.isVolatile() && I.mayWriteToMemory())
2427         return true;
2428 
2429       // Skip instructions that are already saved.
2430       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2431         return true;
2432 
2433       // If we reach here, we know we have an instruction
2434       // that accesses memory through a pointer operand,
2435       // for which getPointerOperand() should give it to us.
2436       Value *PtrOp =
2437           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2438       assert(PtrOp &&
2439              "Expected pointer operand of memory accessing instruction");
2440 
2441       // Either we stopped and the appropriate action was taken,
2442       // or we got back a simplified value to continue.
2443       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2444       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2445         return true;
2446       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2447 
2448       // A memory access through a pointer is considered UB
2449       // only if the pointer has constant null value.
2450       // TODO: Expand it to not only check constant values.
2451       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2452         AssumedNoUBInsts.insert(&I);
2453         return true;
2454       }
2455       const Type *PtrTy = PtrOpVal->getType();
2456 
2457       // Because we only consider instructions inside functions,
2458       // assume that a parent function exists.
2459       const Function *F = I.getFunction();
2460 
2461       // A memory access using constant null pointer is only considered UB
2462       // if null pointer is _not_ defined for the target platform.
2463       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2464         AssumedNoUBInsts.insert(&I);
2465       else
2466         KnownUBInsts.insert(&I);
2467       return true;
2468     };
2469 
2470     auto InspectBrInstForUB = [&](Instruction &I) {
2471       // A conditional branch instruction is considered UB if it has `undef`
2472       // condition.
2473 
2474       // Skip instructions that are already saved.
2475       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2476         return true;
2477 
2478       // We know we have a branch instruction.
2479       auto *BrInst = cast<BranchInst>(&I);
2480 
2481       // Unconditional branches are never considered UB.
2482       if (BrInst->isUnconditional())
2483         return true;
2484 
2485       // Either we stopped and the appropriate action was taken,
2486       // or we got back a simplified value to continue.
2487       Optional<Value *> SimplifiedCond =
2488           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2489       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2490         return true;
2491       AssumedNoUBInsts.insert(&I);
2492       return true;
2493     };
2494 
2495     auto InspectCallSiteForUB = [&](Instruction &I) {
2496       // Check whether a callsite always cause UB or not
2497 
2498       // Skip instructions that are already saved.
2499       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2500         return true;
2501 
2502       // Check nonnull and noundef argument attribute violation for each
2503       // callsite.
2504       CallBase &CB = cast<CallBase>(I);
2505       Function *Callee = CB.getCalledFunction();
2506       if (!Callee)
2507         return true;
2508       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2509         // If current argument is known to be simplified to null pointer and the
2510         // corresponding argument position is known to have nonnull attribute,
2511         // the argument is poison. Furthermore, if the argument is poison and
2512         // the position is known to have noundef attriubte, this callsite is
2513         // considered UB.
2514         if (idx >= Callee->arg_size())
2515           break;
2516         Value *ArgVal = CB.getArgOperand(idx);
2517         if (!ArgVal)
2518           continue;
2519         // Here, we handle three cases.
2520         //   (1) Not having a value means it is dead. (we can replace the value
2521         //       with undef)
2522         //   (2) Simplified to undef. The argument violate noundef attriubte.
2523         //   (3) Simplified to null pointer where known to be nonnull.
2524         //       The argument is a poison value and violate noundef attribute.
2525         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2526         auto &NoUndefAA =
2527             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2528         if (!NoUndefAA.isKnownNoUndef())
2529           continue;
2530         bool UsedAssumedInformation = false;
2531         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2532             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2533         if (UsedAssumedInformation)
2534           continue;
2535         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2536           return true;
2537         if (!SimplifiedVal.hasValue() ||
2538             isa<UndefValue>(*SimplifiedVal.getValue())) {
2539           KnownUBInsts.insert(&I);
2540           continue;
2541         }
2542         if (!ArgVal->getType()->isPointerTy() ||
2543             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2544           continue;
2545         auto &NonNullAA =
2546             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2547         if (NonNullAA.isKnownNonNull())
2548           KnownUBInsts.insert(&I);
2549       }
2550       return true;
2551     };
2552 
2553     auto InspectReturnInstForUB =
2554         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2555           // Check if a return instruction always cause UB or not
2556           // Note: It is guaranteed that the returned position of the anchor
2557           //       scope has noundef attribute when this is called.
2558           //       We also ensure the return position is not "assumed dead"
2559           //       because the returned value was then potentially simplified to
2560           //       `undef` in AAReturnedValues without removing the `noundef`
2561           //       attribute yet.
2562 
2563           // When the returned position has noundef attriubte, UB occur in the
2564           // following cases.
2565           //   (1) Returned value is known to be undef.
2566           //   (2) The value is known to be a null pointer and the returned
2567           //       position has nonnull attribute (because the returned value is
2568           //       poison).
2569           bool FoundUB = false;
2570           if (isa<UndefValue>(V)) {
2571             FoundUB = true;
2572           } else {
2573             if (isa<ConstantPointerNull>(V)) {
2574               auto &NonNullAA = A.getAAFor<AANonNull>(
2575                   *this, IRPosition::returned(*getAnchorScope()),
2576                   DepClassTy::NONE);
2577               if (NonNullAA.isKnownNonNull())
2578                 FoundUB = true;
2579             }
2580           }
2581 
2582           if (FoundUB)
2583             for (ReturnInst *RI : RetInsts)
2584               KnownUBInsts.insert(RI);
2585           return true;
2586         };
2587 
2588     bool UsedAssumedInformation = false;
2589     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2590                               {Instruction::Load, Instruction::Store,
2591                                Instruction::AtomicCmpXchg,
2592                                Instruction::AtomicRMW},
2593                               UsedAssumedInformation,
2594                               /* CheckBBLivenessOnly */ true);
2595     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2596                               UsedAssumedInformation,
2597                               /* CheckBBLivenessOnly */ true);
2598     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2599                                       UsedAssumedInformation);
2600 
2601     // If the returned position of the anchor scope has noundef attriubte, check
2602     // all returned instructions.
2603     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2604       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2605       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2606         auto &RetPosNoUndefAA =
2607             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2608         if (RetPosNoUndefAA.isKnownNoUndef())
2609           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2610                                                     *this);
2611       }
2612     }
2613 
2614     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2615         UBPrevSize != KnownUBInsts.size())
2616       return ChangeStatus::CHANGED;
2617     return ChangeStatus::UNCHANGED;
2618   }
2619 
2620   bool isKnownToCauseUB(Instruction *I) const override {
2621     return KnownUBInsts.count(I);
2622   }
2623 
2624   bool isAssumedToCauseUB(Instruction *I) const override {
2625     // In simple words, if an instruction is not in the assumed to _not_
2626     // cause UB, then it is assumed UB (that includes those
2627     // in the KnownUBInsts set). The rest is boilerplate
2628     // is to ensure that it is one of the instructions we test
2629     // for UB.
2630 
2631     switch (I->getOpcode()) {
2632     case Instruction::Load:
2633     case Instruction::Store:
2634     case Instruction::AtomicCmpXchg:
2635     case Instruction::AtomicRMW:
2636       return !AssumedNoUBInsts.count(I);
2637     case Instruction::Br: {
2638       auto BrInst = cast<BranchInst>(I);
2639       if (BrInst->isUnconditional())
2640         return false;
2641       return !AssumedNoUBInsts.count(I);
2642     } break;
2643     default:
2644       return false;
2645     }
2646     return false;
2647   }
2648 
2649   ChangeStatus manifest(Attributor &A) override {
2650     if (KnownUBInsts.empty())
2651       return ChangeStatus::UNCHANGED;
2652     for (Instruction *I : KnownUBInsts)
2653       A.changeToUnreachableAfterManifest(I);
2654     return ChangeStatus::CHANGED;
2655   }
2656 
2657   /// See AbstractAttribute::getAsStr()
2658   const std::string getAsStr() const override {
2659     return getAssumed() ? "undefined-behavior" : "no-ub";
2660   }
2661 
2662   /// Note: The correctness of this analysis depends on the fact that the
2663   /// following 2 sets will stop changing after some point.
2664   /// "Change" here means that their size changes.
2665   /// The size of each set is monotonically increasing
2666   /// (we only add items to them) and it is upper bounded by the number of
2667   /// instructions in the processed function (we can never save more
2668   /// elements in either set than this number). Hence, at some point,
2669   /// they will stop increasing.
2670   /// Consequently, at some point, both sets will have stopped
2671   /// changing, effectively making the analysis reach a fixpoint.
2672 
2673   /// Note: These 2 sets are disjoint and an instruction can be considered
2674   /// one of 3 things:
2675   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2676   ///    the KnownUBInsts set.
2677   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2678   ///    has a reason to assume it).
2679   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2680   ///    could not find a reason to assume or prove that it can cause UB,
2681   ///    hence it assumes it doesn't. We have a set for these instructions
2682   ///    so that we don't reprocess them in every update.
2683   ///    Note however that instructions in this set may cause UB.
2684 
2685 protected:
2686   /// A set of all live instructions _known_ to cause UB.
2687   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2688 
2689 private:
2690   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2691   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2692 
2693   // Should be called on updates in which if we're processing an instruction
2694   // \p I that depends on a value \p V, one of the following has to happen:
2695   // - If the value is assumed, then stop.
2696   // - If the value is known but undef, then consider it UB.
2697   // - Otherwise, do specific processing with the simplified value.
2698   // We return None in the first 2 cases to signify that an appropriate
2699   // action was taken and the caller should stop.
2700   // Otherwise, we return the simplified value that the caller should
2701   // use for specific processing.
2702   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2703                                          Instruction *I) {
2704     bool UsedAssumedInformation = false;
2705     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2706         IRPosition::value(*V), *this, UsedAssumedInformation);
2707     if (!UsedAssumedInformation) {
2708       // Don't depend on assumed values.
2709       if (!SimplifiedV.hasValue()) {
2710         // If it is known (which we tested above) but it doesn't have a value,
2711         // then we can assume `undef` and hence the instruction is UB.
2712         KnownUBInsts.insert(I);
2713         return llvm::None;
2714       }
2715       if (!SimplifiedV.getValue())
2716         return nullptr;
2717       V = *SimplifiedV;
2718     }
2719     if (isa<UndefValue>(V)) {
2720       KnownUBInsts.insert(I);
2721       return llvm::None;
2722     }
2723     return V;
2724   }
2725 };
2726 
2727 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2728   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2729       : AAUndefinedBehaviorImpl(IRP, A) {}
2730 
2731   /// See AbstractAttribute::trackStatistics()
2732   void trackStatistics() const override {
2733     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2734                "Number of instructions known to have UB");
2735     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2736         KnownUBInsts.size();
2737   }
2738 };
2739 
2740 /// ------------------------ Will-Return Attributes ----------------------------
2741 
2742 // Helper function that checks whether a function has any cycle which we don't
2743 // know if it is bounded or not.
2744 // Loops with maximum trip count are considered bounded, any other cycle not.
2745 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2746   ScalarEvolution *SE =
2747       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2748   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2749   // If either SCEV or LoopInfo is not available for the function then we assume
2750   // any cycle to be unbounded cycle.
2751   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2752   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2753   if (!SE || !LI) {
2754     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2755       if (SCCI.hasCycle())
2756         return true;
2757     return false;
2758   }
2759 
2760   // If there's irreducible control, the function may contain non-loop cycles.
2761   if (mayContainIrreducibleControl(F, LI))
2762     return true;
2763 
2764   // Any loop that does not have a max trip count is considered unbounded cycle.
2765   for (auto *L : LI->getLoopsInPreorder()) {
2766     if (!SE->getSmallConstantMaxTripCount(L))
2767       return true;
2768   }
2769   return false;
2770 }
2771 
2772 struct AAWillReturnImpl : public AAWillReturn {
2773   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2774       : AAWillReturn(IRP, A) {}
2775 
2776   /// See AbstractAttribute::initialize(...).
2777   void initialize(Attributor &A) override {
2778     AAWillReturn::initialize(A);
2779 
2780     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2781       indicateOptimisticFixpoint();
2782       return;
2783     }
2784   }
2785 
2786   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2787   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2788     // Check for `mustprogress` in the scope and the associated function which
2789     // might be different if this is a call site.
2790     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2791         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2792       return false;
2793 
2794     const auto &MemAA =
2795         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2796     if (!MemAA.isAssumedReadOnly())
2797       return false;
2798     if (KnownOnly && !MemAA.isKnownReadOnly())
2799       return false;
2800     if (!MemAA.isKnownReadOnly())
2801       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2802 
2803     return true;
2804   }
2805 
2806   /// See AbstractAttribute::updateImpl(...).
2807   ChangeStatus updateImpl(Attributor &A) override {
2808     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2809       return ChangeStatus::UNCHANGED;
2810 
2811     auto CheckForWillReturn = [&](Instruction &I) {
2812       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2813       const auto &WillReturnAA =
2814           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2815       if (WillReturnAA.isKnownWillReturn())
2816         return true;
2817       if (!WillReturnAA.isAssumedWillReturn())
2818         return false;
2819       const auto &NoRecurseAA =
2820           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2821       return NoRecurseAA.isAssumedNoRecurse();
2822     };
2823 
2824     bool UsedAssumedInformation = false;
2825     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2826                                            UsedAssumedInformation))
2827       return indicatePessimisticFixpoint();
2828 
2829     return ChangeStatus::UNCHANGED;
2830   }
2831 
2832   /// See AbstractAttribute::getAsStr()
2833   const std::string getAsStr() const override {
2834     return getAssumed() ? "willreturn" : "may-noreturn";
2835   }
2836 };
2837 
2838 struct AAWillReturnFunction final : AAWillReturnImpl {
2839   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2840       : AAWillReturnImpl(IRP, A) {}
2841 
2842   /// See AbstractAttribute::initialize(...).
2843   void initialize(Attributor &A) override {
2844     AAWillReturnImpl::initialize(A);
2845 
2846     Function *F = getAnchorScope();
2847     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2848       indicatePessimisticFixpoint();
2849   }
2850 
2851   /// See AbstractAttribute::trackStatistics()
2852   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2853 };
2854 
2855 /// WillReturn attribute deduction for a call sites.
2856 struct AAWillReturnCallSite final : AAWillReturnImpl {
2857   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2858       : AAWillReturnImpl(IRP, A) {}
2859 
2860   /// See AbstractAttribute::initialize(...).
2861   void initialize(Attributor &A) override {
2862     AAWillReturnImpl::initialize(A);
2863     Function *F = getAssociatedFunction();
2864     if (!F || !A.isFunctionIPOAmendable(*F))
2865       indicatePessimisticFixpoint();
2866   }
2867 
2868   /// See AbstractAttribute::updateImpl(...).
2869   ChangeStatus updateImpl(Attributor &A) override {
2870     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2871       return ChangeStatus::UNCHANGED;
2872 
2873     // TODO: Once we have call site specific value information we can provide
2874     //       call site specific liveness information and then it makes
2875     //       sense to specialize attributes for call sites arguments instead of
2876     //       redirecting requests to the callee argument.
2877     Function *F = getAssociatedFunction();
2878     const IRPosition &FnPos = IRPosition::function(*F);
2879     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2880     return clampStateAndIndicateChange(getState(), FnAA.getState());
2881   }
2882 
2883   /// See AbstractAttribute::trackStatistics()
2884   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2885 };
2886 
2887 /// -------------------AAReachability Attribute--------------------------
2888 
2889 struct AAReachabilityImpl : AAReachability {
2890   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2891       : AAReachability(IRP, A) {}
2892 
2893   const std::string getAsStr() const override {
2894     // TODO: Return the number of reachable queries.
2895     return "reachable";
2896   }
2897 
2898   /// See AbstractAttribute::updateImpl(...).
2899   ChangeStatus updateImpl(Attributor &A) override {
2900     return ChangeStatus::UNCHANGED;
2901   }
2902 };
2903 
2904 struct AAReachabilityFunction final : public AAReachabilityImpl {
2905   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2906       : AAReachabilityImpl(IRP, A) {}
2907 
2908   /// See AbstractAttribute::trackStatistics()
2909   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2910 };
2911 
2912 /// ------------------------ NoAlias Argument Attribute ------------------------
2913 
2914 struct AANoAliasImpl : AANoAlias {
2915   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2916     assert(getAssociatedType()->isPointerTy() &&
2917            "Noalias is a pointer attribute");
2918   }
2919 
2920   const std::string getAsStr() const override {
2921     return getAssumed() ? "noalias" : "may-alias";
2922   }
2923 };
2924 
2925 /// NoAlias attribute for a floating value.
2926 struct AANoAliasFloating final : AANoAliasImpl {
2927   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2928       : AANoAliasImpl(IRP, A) {}
2929 
2930   /// See AbstractAttribute::initialize(...).
2931   void initialize(Attributor &A) override {
2932     AANoAliasImpl::initialize(A);
2933     Value *Val = &getAssociatedValue();
2934     do {
2935       CastInst *CI = dyn_cast<CastInst>(Val);
2936       if (!CI)
2937         break;
2938       Value *Base = CI->getOperand(0);
2939       if (!Base->hasOneUse())
2940         break;
2941       Val = Base;
2942     } while (true);
2943 
2944     if (!Val->getType()->isPointerTy()) {
2945       indicatePessimisticFixpoint();
2946       return;
2947     }
2948 
2949     if (isa<AllocaInst>(Val))
2950       indicateOptimisticFixpoint();
2951     else if (isa<ConstantPointerNull>(Val) &&
2952              !NullPointerIsDefined(getAnchorScope(),
2953                                    Val->getType()->getPointerAddressSpace()))
2954       indicateOptimisticFixpoint();
2955     else if (Val != &getAssociatedValue()) {
2956       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2957           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2958       if (ValNoAliasAA.isKnownNoAlias())
2959         indicateOptimisticFixpoint();
2960     }
2961   }
2962 
2963   /// See AbstractAttribute::updateImpl(...).
2964   ChangeStatus updateImpl(Attributor &A) override {
2965     // TODO: Implement this.
2966     return indicatePessimisticFixpoint();
2967   }
2968 
2969   /// See AbstractAttribute::trackStatistics()
2970   void trackStatistics() const override {
2971     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2972   }
2973 };
2974 
2975 /// NoAlias attribute for an argument.
2976 struct AANoAliasArgument final
2977     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2978   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2979   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2980 
2981   /// See AbstractAttribute::initialize(...).
2982   void initialize(Attributor &A) override {
2983     Base::initialize(A);
2984     // See callsite argument attribute and callee argument attribute.
2985     if (hasAttr({Attribute::ByVal}))
2986       indicateOptimisticFixpoint();
2987   }
2988 
2989   /// See AbstractAttribute::update(...).
2990   ChangeStatus updateImpl(Attributor &A) override {
2991     // We have to make sure no-alias on the argument does not break
2992     // synchronization when this is a callback argument, see also [1] below.
2993     // If synchronization cannot be affected, we delegate to the base updateImpl
2994     // function, otherwise we give up for now.
2995 
2996     // If the function is no-sync, no-alias cannot break synchronization.
2997     const auto &NoSyncAA =
2998         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2999                              DepClassTy::OPTIONAL);
3000     if (NoSyncAA.isAssumedNoSync())
3001       return Base::updateImpl(A);
3002 
3003     // If the argument is read-only, no-alias cannot break synchronization.
3004     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3005         *this, getIRPosition(), DepClassTy::OPTIONAL);
3006     if (MemBehaviorAA.isAssumedReadOnly())
3007       return Base::updateImpl(A);
3008 
3009     // If the argument is never passed through callbacks, no-alias cannot break
3010     // synchronization.
3011     bool AllCallSitesKnown;
3012     if (A.checkForAllCallSites(
3013             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3014             true, AllCallSitesKnown))
3015       return Base::updateImpl(A);
3016 
3017     // TODO: add no-alias but make sure it doesn't break synchronization by
3018     // introducing fake uses. See:
3019     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3020     //     International Workshop on OpenMP 2018,
3021     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3022 
3023     return indicatePessimisticFixpoint();
3024   }
3025 
3026   /// See AbstractAttribute::trackStatistics()
3027   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3028 };
3029 
3030 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3031   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3032       : AANoAliasImpl(IRP, A) {}
3033 
3034   /// See AbstractAttribute::initialize(...).
3035   void initialize(Attributor &A) override {
3036     // See callsite argument attribute and callee argument attribute.
3037     const auto &CB = cast<CallBase>(getAnchorValue());
3038     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3039       indicateOptimisticFixpoint();
3040     Value &Val = getAssociatedValue();
3041     if (isa<ConstantPointerNull>(Val) &&
3042         !NullPointerIsDefined(getAnchorScope(),
3043                               Val.getType()->getPointerAddressSpace()))
3044       indicateOptimisticFixpoint();
3045   }
3046 
3047   /// Determine if the underlying value may alias with the call site argument
3048   /// \p OtherArgNo of \p ICS (= the underlying call site).
3049   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3050                             const AAMemoryBehavior &MemBehaviorAA,
3051                             const CallBase &CB, unsigned OtherArgNo) {
3052     // We do not need to worry about aliasing with the underlying IRP.
3053     if (this->getCalleeArgNo() == (int)OtherArgNo)
3054       return false;
3055 
3056     // If it is not a pointer or pointer vector we do not alias.
3057     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3058     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3059       return false;
3060 
3061     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3062         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3063 
3064     // If the argument is readnone, there is no read-write aliasing.
3065     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3066       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3067       return false;
3068     }
3069 
3070     // If the argument is readonly and the underlying value is readonly, there
3071     // is no read-write aliasing.
3072     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3073     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3074       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3075       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3076       return false;
3077     }
3078 
3079     // We have to utilize actual alias analysis queries so we need the object.
3080     if (!AAR)
3081       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3082 
3083     // Try to rule it out at the call site.
3084     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3085     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3086                          "callsite arguments: "
3087                       << getAssociatedValue() << " " << *ArgOp << " => "
3088                       << (IsAliasing ? "" : "no-") << "alias \n");
3089 
3090     return IsAliasing;
3091   }
3092 
3093   bool
3094   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3095                                          const AAMemoryBehavior &MemBehaviorAA,
3096                                          const AANoAlias &NoAliasAA) {
3097     // We can deduce "noalias" if the following conditions hold.
3098     // (i)   Associated value is assumed to be noalias in the definition.
3099     // (ii)  Associated value is assumed to be no-capture in all the uses
3100     //       possibly executed before this callsite.
3101     // (iii) There is no other pointer argument which could alias with the
3102     //       value.
3103 
3104     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3105     if (!AssociatedValueIsNoAliasAtDef) {
3106       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3107                         << " is not no-alias at the definition\n");
3108       return false;
3109     }
3110 
3111     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3112 
3113     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3114     const Function *ScopeFn = VIRP.getAnchorScope();
3115     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3116     // Check whether the value is captured in the scope using AANoCapture.
3117     //      Look at CFG and check only uses possibly executed before this
3118     //      callsite.
3119     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3120       Instruction *UserI = cast<Instruction>(U.getUser());
3121 
3122       // If UserI is the curr instruction and there is a single potential use of
3123       // the value in UserI we allow the use.
3124       // TODO: We should inspect the operands and allow those that cannot alias
3125       //       with the value.
3126       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3127         return true;
3128 
3129       if (ScopeFn) {
3130         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3131             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3132 
3133         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3134           return true;
3135 
3136         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3137           if (CB->isArgOperand(&U)) {
3138 
3139             unsigned ArgNo = CB->getArgOperandNo(&U);
3140 
3141             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3142                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3143                 DepClassTy::OPTIONAL);
3144 
3145             if (NoCaptureAA.isAssumedNoCapture())
3146               return true;
3147           }
3148         }
3149       }
3150 
3151       // For cases which can potentially have more users
3152       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3153           isa<SelectInst>(U)) {
3154         Follow = true;
3155         return true;
3156       }
3157 
3158       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3159       return false;
3160     };
3161 
3162     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3163       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3164         LLVM_DEBUG(
3165             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3166                    << " cannot be noalias as it is potentially captured\n");
3167         return false;
3168       }
3169     }
3170     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3171 
3172     // Check there is no other pointer argument which could alias with the
3173     // value passed at this call site.
3174     // TODO: AbstractCallSite
3175     const auto &CB = cast<CallBase>(getAnchorValue());
3176     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3177       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3178         return false;
3179 
3180     return true;
3181   }
3182 
3183   /// See AbstractAttribute::updateImpl(...).
3184   ChangeStatus updateImpl(Attributor &A) override {
3185     // If the argument is readnone we are done as there are no accesses via the
3186     // argument.
3187     auto &MemBehaviorAA =
3188         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3189     if (MemBehaviorAA.isAssumedReadNone()) {
3190       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3191       return ChangeStatus::UNCHANGED;
3192     }
3193 
3194     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3195     const auto &NoAliasAA =
3196         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3197 
3198     AAResults *AAR = nullptr;
3199     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3200                                                NoAliasAA)) {
3201       LLVM_DEBUG(
3202           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3203       return ChangeStatus::UNCHANGED;
3204     }
3205 
3206     return indicatePessimisticFixpoint();
3207   }
3208 
3209   /// See AbstractAttribute::trackStatistics()
3210   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3211 };
3212 
3213 /// NoAlias attribute for function return value.
3214 struct AANoAliasReturned final : AANoAliasImpl {
3215   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3216       : AANoAliasImpl(IRP, A) {}
3217 
3218   /// See AbstractAttribute::initialize(...).
3219   void initialize(Attributor &A) override {
3220     AANoAliasImpl::initialize(A);
3221     Function *F = getAssociatedFunction();
3222     if (!F || F->isDeclaration())
3223       indicatePessimisticFixpoint();
3224   }
3225 
3226   /// See AbstractAttribute::updateImpl(...).
3227   virtual ChangeStatus updateImpl(Attributor &A) override {
3228 
3229     auto CheckReturnValue = [&](Value &RV) -> bool {
3230       if (Constant *C = dyn_cast<Constant>(&RV))
3231         if (C->isNullValue() || isa<UndefValue>(C))
3232           return true;
3233 
3234       /// For now, we can only deduce noalias if we have call sites.
3235       /// FIXME: add more support.
3236       if (!isa<CallBase>(&RV))
3237         return false;
3238 
3239       const IRPosition &RVPos = IRPosition::value(RV);
3240       const auto &NoAliasAA =
3241           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3242       if (!NoAliasAA.isAssumedNoAlias())
3243         return false;
3244 
3245       const auto &NoCaptureAA =
3246           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3247       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3248     };
3249 
3250     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3251       return indicatePessimisticFixpoint();
3252 
3253     return ChangeStatus::UNCHANGED;
3254   }
3255 
3256   /// See AbstractAttribute::trackStatistics()
3257   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3258 };
3259 
3260 /// NoAlias attribute deduction for a call site return value.
3261 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3262   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3263       : AANoAliasImpl(IRP, A) {}
3264 
3265   /// See AbstractAttribute::initialize(...).
3266   void initialize(Attributor &A) override {
3267     AANoAliasImpl::initialize(A);
3268     Function *F = getAssociatedFunction();
3269     if (!F || F->isDeclaration())
3270       indicatePessimisticFixpoint();
3271   }
3272 
3273   /// See AbstractAttribute::updateImpl(...).
3274   ChangeStatus updateImpl(Attributor &A) override {
3275     // TODO: Once we have call site specific value information we can provide
3276     //       call site specific liveness information and then it makes
3277     //       sense to specialize attributes for call sites arguments instead of
3278     //       redirecting requests to the callee argument.
3279     Function *F = getAssociatedFunction();
3280     const IRPosition &FnPos = IRPosition::returned(*F);
3281     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3282     return clampStateAndIndicateChange(getState(), FnAA.getState());
3283   }
3284 
3285   /// See AbstractAttribute::trackStatistics()
3286   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3287 };
3288 
3289 /// -------------------AAIsDead Function Attribute-----------------------
3290 
3291 struct AAIsDeadValueImpl : public AAIsDead {
3292   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3293 
3294   /// See AAIsDead::isAssumedDead().
3295   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3296 
3297   /// See AAIsDead::isKnownDead().
3298   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3299 
3300   /// See AAIsDead::isAssumedDead(BasicBlock *).
3301   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3302 
3303   /// See AAIsDead::isKnownDead(BasicBlock *).
3304   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3305 
3306   /// See AAIsDead::isAssumedDead(Instruction *I).
3307   bool isAssumedDead(const Instruction *I) const override {
3308     return I == getCtxI() && isAssumedDead();
3309   }
3310 
3311   /// See AAIsDead::isKnownDead(Instruction *I).
3312   bool isKnownDead(const Instruction *I) const override {
3313     return isAssumedDead(I) && isKnownDead();
3314   }
3315 
3316   /// See AbstractAttribute::getAsStr().
3317   const std::string getAsStr() const override {
3318     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3319   }
3320 
3321   /// Check if all uses are assumed dead.
3322   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3323     // Callers might not check the type, void has no uses.
3324     if (V.getType()->isVoidTy())
3325       return true;
3326 
3327     // If we replace a value with a constant there are no uses left afterwards.
3328     if (!isa<Constant>(V)) {
3329       bool UsedAssumedInformation = false;
3330       Optional<Constant *> C =
3331           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3332       if (!C.hasValue() || *C)
3333         return true;
3334     }
3335 
3336     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3337     // Explicitly set the dependence class to required because we want a long
3338     // chain of N dependent instructions to be considered live as soon as one is
3339     // without going through N update cycles. This is not required for
3340     // correctness.
3341     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3342                              DepClassTy::REQUIRED);
3343   }
3344 
3345   /// Determine if \p I is assumed to be side-effect free.
3346   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3347     if (!I || wouldInstructionBeTriviallyDead(I))
3348       return true;
3349 
3350     auto *CB = dyn_cast<CallBase>(I);
3351     if (!CB || isa<IntrinsicInst>(CB))
3352       return false;
3353 
3354     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3355     const auto &NoUnwindAA =
3356         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3357     if (!NoUnwindAA.isAssumedNoUnwind())
3358       return false;
3359     if (!NoUnwindAA.isKnownNoUnwind())
3360       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3361 
3362     const auto &MemBehaviorAA =
3363         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3364     if (MemBehaviorAA.isAssumedReadOnly()) {
3365       if (!MemBehaviorAA.isKnownReadOnly())
3366         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3367       return true;
3368     }
3369     return false;
3370   }
3371 };
3372 
3373 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3374   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3375       : AAIsDeadValueImpl(IRP, A) {}
3376 
3377   /// See AbstractAttribute::initialize(...).
3378   void initialize(Attributor &A) override {
3379     if (isa<UndefValue>(getAssociatedValue())) {
3380       indicatePessimisticFixpoint();
3381       return;
3382     }
3383 
3384     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3385     if (!isAssumedSideEffectFree(A, I)) {
3386       if (!isa_and_nonnull<StoreInst>(I))
3387         indicatePessimisticFixpoint();
3388       else
3389         removeAssumedBits(HAS_NO_EFFECT);
3390     }
3391   }
3392 
3393   bool isDeadStore(Attributor &A, StoreInst &SI) {
3394     // Lang ref now states volatile store is not UB/dead, let's skip them.
3395     if (SI.isVolatile())
3396       return false;
3397 
3398     bool UsedAssumedInformation = false;
3399     SmallSetVector<Value *, 4> PotentialCopies;
3400     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3401                                              UsedAssumedInformation))
3402       return false;
3403     return llvm::all_of(PotentialCopies, [&](Value *V) {
3404       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3405                              UsedAssumedInformation);
3406     });
3407   }
3408 
3409   /// See AbstractAttribute::updateImpl(...).
3410   ChangeStatus updateImpl(Attributor &A) override {
3411     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3412     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3413       if (!isDeadStore(A, *SI))
3414         return indicatePessimisticFixpoint();
3415     } else {
3416       if (!isAssumedSideEffectFree(A, I))
3417         return indicatePessimisticFixpoint();
3418       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3419         return indicatePessimisticFixpoint();
3420     }
3421     return ChangeStatus::UNCHANGED;
3422   }
3423 
3424   /// See AbstractAttribute::manifest(...).
3425   ChangeStatus manifest(Attributor &A) override {
3426     Value &V = getAssociatedValue();
3427     if (auto *I = dyn_cast<Instruction>(&V)) {
3428       // If we get here we basically know the users are all dead. We check if
3429       // isAssumedSideEffectFree returns true here again because it might not be
3430       // the case and only the users are dead but the instruction (=call) is
3431       // still needed.
3432       if (isa<StoreInst>(I) ||
3433           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3434         A.deleteAfterManifest(*I);
3435         return ChangeStatus::CHANGED;
3436       }
3437     }
3438     if (V.use_empty())
3439       return ChangeStatus::UNCHANGED;
3440 
3441     bool UsedAssumedInformation = false;
3442     Optional<Constant *> C =
3443         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3444     if (C.hasValue() && C.getValue())
3445       return ChangeStatus::UNCHANGED;
3446 
3447     // Replace the value with undef as it is dead but keep droppable uses around
3448     // as they provide information we don't want to give up on just yet.
3449     UndefValue &UV = *UndefValue::get(V.getType());
3450     bool AnyChange =
3451         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3452     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3453   }
3454 
3455   /// See AbstractAttribute::trackStatistics()
3456   void trackStatistics() const override {
3457     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3458   }
3459 };
3460 
3461 struct AAIsDeadArgument : public AAIsDeadFloating {
3462   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3463       : AAIsDeadFloating(IRP, A) {}
3464 
3465   /// See AbstractAttribute::initialize(...).
3466   void initialize(Attributor &A) override {
3467     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3468       indicatePessimisticFixpoint();
3469   }
3470 
3471   /// See AbstractAttribute::manifest(...).
3472   ChangeStatus manifest(Attributor &A) override {
3473     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3474     Argument &Arg = *getAssociatedArgument();
3475     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3476       if (A.registerFunctionSignatureRewrite(
3477               Arg, /* ReplacementTypes */ {},
3478               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3479               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3480         Arg.dropDroppableUses();
3481         return ChangeStatus::CHANGED;
3482       }
3483     return Changed;
3484   }
3485 
3486   /// See AbstractAttribute::trackStatistics()
3487   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3488 };
3489 
3490 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3491   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3492       : AAIsDeadValueImpl(IRP, A) {}
3493 
3494   /// See AbstractAttribute::initialize(...).
3495   void initialize(Attributor &A) override {
3496     if (isa<UndefValue>(getAssociatedValue()))
3497       indicatePessimisticFixpoint();
3498   }
3499 
3500   /// See AbstractAttribute::updateImpl(...).
3501   ChangeStatus updateImpl(Attributor &A) override {
3502     // TODO: Once we have call site specific value information we can provide
3503     //       call site specific liveness information and then it makes
3504     //       sense to specialize attributes for call sites arguments instead of
3505     //       redirecting requests to the callee argument.
3506     Argument *Arg = getAssociatedArgument();
3507     if (!Arg)
3508       return indicatePessimisticFixpoint();
3509     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3510     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3511     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3512   }
3513 
3514   /// See AbstractAttribute::manifest(...).
3515   ChangeStatus manifest(Attributor &A) override {
3516     CallBase &CB = cast<CallBase>(getAnchorValue());
3517     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3518     assert(!isa<UndefValue>(U.get()) &&
3519            "Expected undef values to be filtered out!");
3520     UndefValue &UV = *UndefValue::get(U->getType());
3521     if (A.changeUseAfterManifest(U, UV))
3522       return ChangeStatus::CHANGED;
3523     return ChangeStatus::UNCHANGED;
3524   }
3525 
3526   /// See AbstractAttribute::trackStatistics()
3527   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3528 };
3529 
3530 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3531   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3532       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3533 
3534   /// See AAIsDead::isAssumedDead().
3535   bool isAssumedDead() const override {
3536     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3537   }
3538 
3539   /// See AbstractAttribute::initialize(...).
3540   void initialize(Attributor &A) override {
3541     if (isa<UndefValue>(getAssociatedValue())) {
3542       indicatePessimisticFixpoint();
3543       return;
3544     }
3545 
3546     // We track this separately as a secondary state.
3547     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3548   }
3549 
3550   /// See AbstractAttribute::updateImpl(...).
3551   ChangeStatus updateImpl(Attributor &A) override {
3552     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3553     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3554       IsAssumedSideEffectFree = false;
3555       Changed = ChangeStatus::CHANGED;
3556     }
3557     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3558       return indicatePessimisticFixpoint();
3559     return Changed;
3560   }
3561 
3562   /// See AbstractAttribute::trackStatistics()
3563   void trackStatistics() const override {
3564     if (IsAssumedSideEffectFree)
3565       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3566     else
3567       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3568   }
3569 
3570   /// See AbstractAttribute::getAsStr().
3571   const std::string getAsStr() const override {
3572     return isAssumedDead()
3573                ? "assumed-dead"
3574                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3575   }
3576 
3577 private:
3578   bool IsAssumedSideEffectFree;
3579 };
3580 
3581 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3582   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3583       : AAIsDeadValueImpl(IRP, A) {}
3584 
3585   /// See AbstractAttribute::updateImpl(...).
3586   ChangeStatus updateImpl(Attributor &A) override {
3587 
3588     bool UsedAssumedInformation = false;
3589     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3590                               {Instruction::Ret}, UsedAssumedInformation);
3591 
3592     auto PredForCallSite = [&](AbstractCallSite ACS) {
3593       if (ACS.isCallbackCall() || !ACS.getInstruction())
3594         return false;
3595       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3596     };
3597 
3598     bool AllCallSitesKnown;
3599     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3600                                 AllCallSitesKnown))
3601       return indicatePessimisticFixpoint();
3602 
3603     return ChangeStatus::UNCHANGED;
3604   }
3605 
3606   /// See AbstractAttribute::manifest(...).
3607   ChangeStatus manifest(Attributor &A) override {
3608     // TODO: Rewrite the signature to return void?
3609     bool AnyChange = false;
3610     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3611     auto RetInstPred = [&](Instruction &I) {
3612       ReturnInst &RI = cast<ReturnInst>(I);
3613       if (!isa<UndefValue>(RI.getReturnValue()))
3614         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3615       return true;
3616     };
3617     bool UsedAssumedInformation = false;
3618     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3619                               UsedAssumedInformation);
3620     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3621   }
3622 
3623   /// See AbstractAttribute::trackStatistics()
3624   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3625 };
3626 
3627 struct AAIsDeadFunction : public AAIsDead {
3628   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3629 
3630   /// See AbstractAttribute::initialize(...).
3631   void initialize(Attributor &A) override {
3632     const Function *F = getAnchorScope();
3633     if (F && !F->isDeclaration()) {
3634       // We only want to compute liveness once. If the function is not part of
3635       // the SCC, skip it.
3636       if (A.isRunOn(*const_cast<Function *>(F))) {
3637         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3638         assumeLive(A, F->getEntryBlock());
3639       } else {
3640         indicatePessimisticFixpoint();
3641       }
3642     }
3643   }
3644 
3645   /// See AbstractAttribute::getAsStr().
3646   const std::string getAsStr() const override {
3647     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3648            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3649            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3650            std::to_string(KnownDeadEnds.size()) + "]";
3651   }
3652 
3653   /// See AbstractAttribute::manifest(...).
3654   ChangeStatus manifest(Attributor &A) override {
3655     assert(getState().isValidState() &&
3656            "Attempted to manifest an invalid state!");
3657 
3658     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3659     Function &F = *getAnchorScope();
3660 
3661     if (AssumedLiveBlocks.empty()) {
3662       A.deleteAfterManifest(F);
3663       return ChangeStatus::CHANGED;
3664     }
3665 
3666     // Flag to determine if we can change an invoke to a call assuming the
3667     // callee is nounwind. This is not possible if the personality of the
3668     // function allows to catch asynchronous exceptions.
3669     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3670 
3671     KnownDeadEnds.set_union(ToBeExploredFrom);
3672     for (const Instruction *DeadEndI : KnownDeadEnds) {
3673       auto *CB = dyn_cast<CallBase>(DeadEndI);
3674       if (!CB)
3675         continue;
3676       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3677           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3678       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3679       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3680         continue;
3681 
3682       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3683         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3684       else
3685         A.changeToUnreachableAfterManifest(
3686             const_cast<Instruction *>(DeadEndI->getNextNode()));
3687       HasChanged = ChangeStatus::CHANGED;
3688     }
3689 
3690     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3691     for (BasicBlock &BB : F)
3692       if (!AssumedLiveBlocks.count(&BB)) {
3693         A.deleteAfterManifest(BB);
3694         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3695       }
3696 
3697     return HasChanged;
3698   }
3699 
3700   /// See AbstractAttribute::updateImpl(...).
3701   ChangeStatus updateImpl(Attributor &A) override;
3702 
3703   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3704     return !AssumedLiveEdges.count(std::make_pair(From, To));
3705   }
3706 
3707   /// See AbstractAttribute::trackStatistics()
3708   void trackStatistics() const override {}
3709 
3710   /// Returns true if the function is assumed dead.
3711   bool isAssumedDead() const override { return false; }
3712 
3713   /// See AAIsDead::isKnownDead().
3714   bool isKnownDead() const override { return false; }
3715 
3716   /// See AAIsDead::isAssumedDead(BasicBlock *).
3717   bool isAssumedDead(const BasicBlock *BB) const override {
3718     assert(BB->getParent() == getAnchorScope() &&
3719            "BB must be in the same anchor scope function.");
3720 
3721     if (!getAssumed())
3722       return false;
3723     return !AssumedLiveBlocks.count(BB);
3724   }
3725 
3726   /// See AAIsDead::isKnownDead(BasicBlock *).
3727   bool isKnownDead(const BasicBlock *BB) const override {
3728     return getKnown() && isAssumedDead(BB);
3729   }
3730 
3731   /// See AAIsDead::isAssumed(Instruction *I).
3732   bool isAssumedDead(const Instruction *I) const override {
3733     assert(I->getParent()->getParent() == getAnchorScope() &&
3734            "Instruction must be in the same anchor scope function.");
3735 
3736     if (!getAssumed())
3737       return false;
3738 
3739     // If it is not in AssumedLiveBlocks then it for sure dead.
3740     // Otherwise, it can still be after noreturn call in a live block.
3741     if (!AssumedLiveBlocks.count(I->getParent()))
3742       return true;
3743 
3744     // If it is not after a liveness barrier it is live.
3745     const Instruction *PrevI = I->getPrevNode();
3746     while (PrevI) {
3747       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3748         return true;
3749       PrevI = PrevI->getPrevNode();
3750     }
3751     return false;
3752   }
3753 
3754   /// See AAIsDead::isKnownDead(Instruction *I).
3755   bool isKnownDead(const Instruction *I) const override {
3756     return getKnown() && isAssumedDead(I);
3757   }
3758 
3759   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3760   /// that internal function called from \p BB should now be looked at.
3761   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3762     if (!AssumedLiveBlocks.insert(&BB).second)
3763       return false;
3764 
3765     // We assume that all of BB is (probably) live now and if there are calls to
3766     // internal functions we will assume that those are now live as well. This
3767     // is a performance optimization for blocks with calls to a lot of internal
3768     // functions. It can however cause dead functions to be treated as live.
3769     for (const Instruction &I : BB)
3770       if (const auto *CB = dyn_cast<CallBase>(&I))
3771         if (const Function *F = CB->getCalledFunction())
3772           if (F->hasLocalLinkage())
3773             A.markLiveInternalFunction(*F);
3774     return true;
3775   }
3776 
3777   /// Collection of instructions that need to be explored again, e.g., we
3778   /// did assume they do not transfer control to (one of their) successors.
3779   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3780 
3781   /// Collection of instructions that are known to not transfer control.
3782   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3783 
3784   /// Collection of all assumed live edges
3785   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3786 
3787   /// Collection of all assumed live BasicBlocks.
3788   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3789 };
3790 
3791 static bool
3792 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3793                         AbstractAttribute &AA,
3794                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3795   const IRPosition &IPos = IRPosition::callsite_function(CB);
3796 
3797   const auto &NoReturnAA =
3798       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3799   if (NoReturnAA.isAssumedNoReturn())
3800     return !NoReturnAA.isKnownNoReturn();
3801   if (CB.isTerminator())
3802     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3803   else
3804     AliveSuccessors.push_back(CB.getNextNode());
3805   return false;
3806 }
3807 
3808 static bool
3809 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3810                         AbstractAttribute &AA,
3811                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3812   bool UsedAssumedInformation =
3813       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3814 
3815   // First, determine if we can change an invoke to a call assuming the
3816   // callee is nounwind. This is not possible if the personality of the
3817   // function allows to catch asynchronous exceptions.
3818   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3819     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3820   } else {
3821     const IRPosition &IPos = IRPosition::callsite_function(II);
3822     const auto &AANoUnw =
3823         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3824     if (AANoUnw.isAssumedNoUnwind()) {
3825       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3826     } else {
3827       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3828     }
3829   }
3830   return UsedAssumedInformation;
3831 }
3832 
3833 static bool
3834 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3835                         AbstractAttribute &AA,
3836                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3837   bool UsedAssumedInformation = false;
3838   if (BI.getNumSuccessors() == 1) {
3839     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3840   } else {
3841     Optional<Constant *> C =
3842         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3843     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3844       // No value yet, assume both edges are dead.
3845     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3846       const BasicBlock *SuccBB =
3847           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3848       AliveSuccessors.push_back(&SuccBB->front());
3849     } else {
3850       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3851       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3852       UsedAssumedInformation = false;
3853     }
3854   }
3855   return UsedAssumedInformation;
3856 }
3857 
3858 static bool
3859 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3860                         AbstractAttribute &AA,
3861                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3862   bool UsedAssumedInformation = false;
3863   Optional<Constant *> C =
3864       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3865   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3866     // No value yet, assume all edges are dead.
3867   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3868     for (auto &CaseIt : SI.cases()) {
3869       if (CaseIt.getCaseValue() == C.getValue()) {
3870         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3871         return UsedAssumedInformation;
3872       }
3873     }
3874     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3875     return UsedAssumedInformation;
3876   } else {
3877     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3878       AliveSuccessors.push_back(&SuccBB->front());
3879   }
3880   return UsedAssumedInformation;
3881 }
3882 
3883 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3884   ChangeStatus Change = ChangeStatus::UNCHANGED;
3885 
3886   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3887                     << getAnchorScope()->size() << "] BBs and "
3888                     << ToBeExploredFrom.size() << " exploration points and "
3889                     << KnownDeadEnds.size() << " known dead ends\n");
3890 
3891   // Copy and clear the list of instructions we need to explore from. It is
3892   // refilled with instructions the next update has to look at.
3893   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3894                                                ToBeExploredFrom.end());
3895   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3896 
3897   SmallVector<const Instruction *, 8> AliveSuccessors;
3898   while (!Worklist.empty()) {
3899     const Instruction *I = Worklist.pop_back_val();
3900     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3901 
3902     // Fast forward for uninteresting instructions. We could look for UB here
3903     // though.
3904     while (!I->isTerminator() && !isa<CallBase>(I))
3905       I = I->getNextNode();
3906 
3907     AliveSuccessors.clear();
3908 
3909     bool UsedAssumedInformation = false;
3910     switch (I->getOpcode()) {
3911     // TODO: look for (assumed) UB to backwards propagate "deadness".
3912     default:
3913       assert(I->isTerminator() &&
3914              "Expected non-terminators to be handled already!");
3915       for (const BasicBlock *SuccBB : successors(I->getParent()))
3916         AliveSuccessors.push_back(&SuccBB->front());
3917       break;
3918     case Instruction::Call:
3919       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3920                                                        *this, AliveSuccessors);
3921       break;
3922     case Instruction::Invoke:
3923       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3924                                                        *this, AliveSuccessors);
3925       break;
3926     case Instruction::Br:
3927       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3928                                                        *this, AliveSuccessors);
3929       break;
3930     case Instruction::Switch:
3931       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3932                                                        *this, AliveSuccessors);
3933       break;
3934     }
3935 
3936     if (UsedAssumedInformation) {
3937       NewToBeExploredFrom.insert(I);
3938     } else if (AliveSuccessors.empty() ||
3939                (I->isTerminator() &&
3940                 AliveSuccessors.size() < I->getNumSuccessors())) {
3941       if (KnownDeadEnds.insert(I))
3942         Change = ChangeStatus::CHANGED;
3943     }
3944 
3945     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3946                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3947                       << UsedAssumedInformation << "\n");
3948 
3949     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3950       if (!I->isTerminator()) {
3951         assert(AliveSuccessors.size() == 1 &&
3952                "Non-terminator expected to have a single successor!");
3953         Worklist.push_back(AliveSuccessor);
3954       } else {
3955         // record the assumed live edge
3956         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3957         if (AssumedLiveEdges.insert(Edge).second)
3958           Change = ChangeStatus::CHANGED;
3959         if (assumeLive(A, *AliveSuccessor->getParent()))
3960           Worklist.push_back(AliveSuccessor);
3961       }
3962     }
3963   }
3964 
3965   // Check if the content of ToBeExploredFrom changed, ignore the order.
3966   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3967       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3968         return !ToBeExploredFrom.count(I);
3969       })) {
3970     Change = ChangeStatus::CHANGED;
3971     ToBeExploredFrom = std::move(NewToBeExploredFrom);
3972   }
3973 
3974   // If we know everything is live there is no need to query for liveness.
3975   // Instead, indicating a pessimistic fixpoint will cause the state to be
3976   // "invalid" and all queries to be answered conservatively without lookups.
3977   // To be in this state we have to (1) finished the exploration and (3) not
3978   // discovered any non-trivial dead end and (2) not ruled unreachable code
3979   // dead.
3980   if (ToBeExploredFrom.empty() &&
3981       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3982       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3983         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3984       }))
3985     return indicatePessimisticFixpoint();
3986   return Change;
3987 }
3988 
3989 /// Liveness information for a call sites.
3990 struct AAIsDeadCallSite final : AAIsDeadFunction {
3991   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3992       : AAIsDeadFunction(IRP, A) {}
3993 
3994   /// See AbstractAttribute::initialize(...).
3995   void initialize(Attributor &A) override {
3996     // TODO: Once we have call site specific value information we can provide
3997     //       call site specific liveness information and then it makes
3998     //       sense to specialize attributes for call sites instead of
3999     //       redirecting requests to the callee.
4000     llvm_unreachable("Abstract attributes for liveness are not "
4001                      "supported for call sites yet!");
4002   }
4003 
4004   /// See AbstractAttribute::updateImpl(...).
4005   ChangeStatus updateImpl(Attributor &A) override {
4006     return indicatePessimisticFixpoint();
4007   }
4008 
4009   /// See AbstractAttribute::trackStatistics()
4010   void trackStatistics() const override {}
4011 };
4012 
4013 /// -------------------- Dereferenceable Argument Attribute --------------------
4014 
4015 struct AADereferenceableImpl : AADereferenceable {
4016   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4017       : AADereferenceable(IRP, A) {}
4018   using StateType = DerefState;
4019 
4020   /// See AbstractAttribute::initialize(...).
4021   void initialize(Attributor &A) override {
4022     SmallVector<Attribute, 4> Attrs;
4023     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4024              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4025     for (const Attribute &Attr : Attrs)
4026       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4027 
4028     const IRPosition &IRP = this->getIRPosition();
4029     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4030 
4031     bool CanBeNull, CanBeFreed;
4032     takeKnownDerefBytesMaximum(
4033         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4034             A.getDataLayout(), CanBeNull, CanBeFreed));
4035 
4036     bool IsFnInterface = IRP.isFnInterfaceKind();
4037     Function *FnScope = IRP.getAnchorScope();
4038     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4039       indicatePessimisticFixpoint();
4040       return;
4041     }
4042 
4043     if (Instruction *CtxI = getCtxI())
4044       followUsesInMBEC(*this, A, getState(), *CtxI);
4045   }
4046 
4047   /// See AbstractAttribute::getState()
4048   /// {
4049   StateType &getState() override { return *this; }
4050   const StateType &getState() const override { return *this; }
4051   /// }
4052 
4053   /// Helper function for collecting accessed bytes in must-be-executed-context
4054   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4055                               DerefState &State) {
4056     const Value *UseV = U->get();
4057     if (!UseV->getType()->isPointerTy())
4058       return;
4059 
4060     Type *PtrTy = UseV->getType();
4061     const DataLayout &DL = A.getDataLayout();
4062     int64_t Offset;
4063     if (const Value *Base = getBasePointerOfAccessPointerOperand(
4064             I, Offset, DL, /*AllowNonInbounds*/ true)) {
4065       if (Base == &getAssociatedValue() &&
4066           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4067         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4068         State.addAccessedBytes(Offset, Size);
4069       }
4070     }
4071   }
4072 
4073   /// See followUsesInMBEC
4074   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4075                        AADereferenceable::StateType &State) {
4076     bool IsNonNull = false;
4077     bool TrackUse = false;
4078     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4079         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4080     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4081                       << " for instruction " << *I << "\n");
4082 
4083     addAccessedBytesForUse(A, U, I, State);
4084     State.takeKnownDerefBytesMaximum(DerefBytes);
4085     return TrackUse;
4086   }
4087 
4088   /// See AbstractAttribute::manifest(...).
4089   ChangeStatus manifest(Attributor &A) override {
4090     ChangeStatus Change = AADereferenceable::manifest(A);
4091     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4092       removeAttrs({Attribute::DereferenceableOrNull});
4093       return ChangeStatus::CHANGED;
4094     }
4095     return Change;
4096   }
4097 
4098   void getDeducedAttributes(LLVMContext &Ctx,
4099                             SmallVectorImpl<Attribute> &Attrs) const override {
4100     // TODO: Add *_globally support
4101     if (isAssumedNonNull())
4102       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4103           Ctx, getAssumedDereferenceableBytes()));
4104     else
4105       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4106           Ctx, getAssumedDereferenceableBytes()));
4107   }
4108 
4109   /// See AbstractAttribute::getAsStr().
4110   const std::string getAsStr() const override {
4111     if (!getAssumedDereferenceableBytes())
4112       return "unknown-dereferenceable";
4113     return std::string("dereferenceable") +
4114            (isAssumedNonNull() ? "" : "_or_null") +
4115            (isAssumedGlobal() ? "_globally" : "") + "<" +
4116            std::to_string(getKnownDereferenceableBytes()) + "-" +
4117            std::to_string(getAssumedDereferenceableBytes()) + ">";
4118   }
4119 };
4120 
4121 /// Dereferenceable attribute for a floating value.
4122 struct AADereferenceableFloating : AADereferenceableImpl {
4123   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4124       : AADereferenceableImpl(IRP, A) {}
4125 
4126   /// See AbstractAttribute::updateImpl(...).
4127   ChangeStatus updateImpl(Attributor &A) override {
4128     const DataLayout &DL = A.getDataLayout();
4129 
4130     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4131                             bool Stripped) -> bool {
4132       unsigned IdxWidth =
4133           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4134       APInt Offset(IdxWidth, 0);
4135       const Value *Base =
4136           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4137 
4138       const auto &AA = A.getAAFor<AADereferenceable>(
4139           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4140       int64_t DerefBytes = 0;
4141       if (!Stripped && this == &AA) {
4142         // Use IR information if we did not strip anything.
4143         // TODO: track globally.
4144         bool CanBeNull, CanBeFreed;
4145         DerefBytes =
4146             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4147         T.GlobalState.indicatePessimisticFixpoint();
4148       } else {
4149         const DerefState &DS = AA.getState();
4150         DerefBytes = DS.DerefBytesState.getAssumed();
4151         T.GlobalState &= DS.GlobalState;
4152       }
4153 
4154       // For now we do not try to "increase" dereferenceability due to negative
4155       // indices as we first have to come up with code to deal with loops and
4156       // for overflows of the dereferenceable bytes.
4157       int64_t OffsetSExt = Offset.getSExtValue();
4158       if (OffsetSExt < 0)
4159         OffsetSExt = 0;
4160 
4161       T.takeAssumedDerefBytesMinimum(
4162           std::max(int64_t(0), DerefBytes - OffsetSExt));
4163 
4164       if (this == &AA) {
4165         if (!Stripped) {
4166           // If nothing was stripped IR information is all we got.
4167           T.takeKnownDerefBytesMaximum(
4168               std::max(int64_t(0), DerefBytes - OffsetSExt));
4169           T.indicatePessimisticFixpoint();
4170         } else if (OffsetSExt > 0) {
4171           // If something was stripped but there is circular reasoning we look
4172           // for the offset. If it is positive we basically decrease the
4173           // dereferenceable bytes in a circluar loop now, which will simply
4174           // drive them down to the known value in a very slow way which we
4175           // can accelerate.
4176           T.indicatePessimisticFixpoint();
4177         }
4178       }
4179 
4180       return T.isValidState();
4181     };
4182 
4183     DerefState T;
4184     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4185                                            VisitValueCB, getCtxI()))
4186       return indicatePessimisticFixpoint();
4187 
4188     return clampStateAndIndicateChange(getState(), T);
4189   }
4190 
4191   /// See AbstractAttribute::trackStatistics()
4192   void trackStatistics() const override {
4193     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4194   }
4195 };
4196 
4197 /// Dereferenceable attribute for a return value.
4198 struct AADereferenceableReturned final
4199     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4200   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4201       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4202             IRP, A) {}
4203 
4204   /// See AbstractAttribute::trackStatistics()
4205   void trackStatistics() const override {
4206     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4207   }
4208 };
4209 
4210 /// Dereferenceable attribute for an argument
4211 struct AADereferenceableArgument final
4212     : AAArgumentFromCallSiteArguments<AADereferenceable,
4213                                       AADereferenceableImpl> {
4214   using Base =
4215       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4216   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4217       : Base(IRP, A) {}
4218 
4219   /// See AbstractAttribute::trackStatistics()
4220   void trackStatistics() const override {
4221     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4222   }
4223 };
4224 
4225 /// Dereferenceable attribute for a call site argument.
4226 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4227   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4228       : AADereferenceableFloating(IRP, A) {}
4229 
4230   /// See AbstractAttribute::trackStatistics()
4231   void trackStatistics() const override {
4232     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4233   }
4234 };
4235 
4236 /// Dereferenceable attribute deduction for a call site return value.
4237 struct AADereferenceableCallSiteReturned final
4238     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4239   using Base =
4240       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4241   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4242       : Base(IRP, A) {}
4243 
4244   /// See AbstractAttribute::trackStatistics()
4245   void trackStatistics() const override {
4246     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4247   }
4248 };
4249 
4250 // ------------------------ Align Argument Attribute ------------------------
4251 
4252 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4253                                     Value &AssociatedValue, const Use *U,
4254                                     const Instruction *I, bool &TrackUse) {
4255   // We need to follow common pointer manipulation uses to the accesses they
4256   // feed into.
4257   if (isa<CastInst>(I)) {
4258     // Follow all but ptr2int casts.
4259     TrackUse = !isa<PtrToIntInst>(I);
4260     return 0;
4261   }
4262   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4263     if (GEP->hasAllConstantIndices())
4264       TrackUse = true;
4265     return 0;
4266   }
4267 
4268   MaybeAlign MA;
4269   if (const auto *CB = dyn_cast<CallBase>(I)) {
4270     if (CB->isBundleOperand(U) || CB->isCallee(U))
4271       return 0;
4272 
4273     unsigned ArgNo = CB->getArgOperandNo(U);
4274     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4275     // As long as we only use known information there is no need to track
4276     // dependences here.
4277     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4278     MA = MaybeAlign(AlignAA.getKnownAlign());
4279   }
4280 
4281   const DataLayout &DL = A.getDataLayout();
4282   const Value *UseV = U->get();
4283   if (auto *SI = dyn_cast<StoreInst>(I)) {
4284     if (SI->getPointerOperand() == UseV)
4285       MA = SI->getAlign();
4286   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4287     if (LI->getPointerOperand() == UseV)
4288       MA = LI->getAlign();
4289   }
4290 
4291   if (!MA || *MA <= QueryingAA.getKnownAlign())
4292     return 0;
4293 
4294   unsigned Alignment = MA->value();
4295   int64_t Offset;
4296 
4297   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4298     if (Base == &AssociatedValue) {
4299       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4300       // So we can say that the maximum power of two which is a divisor of
4301       // gcd(Offset, Alignment) is an alignment.
4302 
4303       uint32_t gcd =
4304           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4305       Alignment = llvm::PowerOf2Floor(gcd);
4306     }
4307   }
4308 
4309   return Alignment;
4310 }
4311 
4312 struct AAAlignImpl : AAAlign {
4313   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4314 
4315   /// See AbstractAttribute::initialize(...).
4316   void initialize(Attributor &A) override {
4317     SmallVector<Attribute, 4> Attrs;
4318     getAttrs({Attribute::Alignment}, Attrs);
4319     for (const Attribute &Attr : Attrs)
4320       takeKnownMaximum(Attr.getValueAsInt());
4321 
4322     Value &V = getAssociatedValue();
4323     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4324     //       use of the function pointer. This was caused by D73131. We want to
4325     //       avoid this for function pointers especially because we iterate
4326     //       their uses and int2ptr is not handled. It is not a correctness
4327     //       problem though!
4328     if (!V.getType()->getPointerElementType()->isFunctionTy())
4329       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4330 
4331     if (getIRPosition().isFnInterfaceKind() &&
4332         (!getAnchorScope() ||
4333          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4334       indicatePessimisticFixpoint();
4335       return;
4336     }
4337 
4338     if (Instruction *CtxI = getCtxI())
4339       followUsesInMBEC(*this, A, getState(), *CtxI);
4340   }
4341 
4342   /// See AbstractAttribute::manifest(...).
4343   ChangeStatus manifest(Attributor &A) override {
4344     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4345 
4346     // Check for users that allow alignment annotations.
4347     Value &AssociatedValue = getAssociatedValue();
4348     for (const Use &U : AssociatedValue.uses()) {
4349       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4350         if (SI->getPointerOperand() == &AssociatedValue)
4351           if (SI->getAlignment() < getAssumedAlign()) {
4352             STATS_DECLTRACK(AAAlign, Store,
4353                             "Number of times alignment added to a store");
4354             SI->setAlignment(Align(getAssumedAlign()));
4355             LoadStoreChanged = ChangeStatus::CHANGED;
4356           }
4357       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4358         if (LI->getPointerOperand() == &AssociatedValue)
4359           if (LI->getAlignment() < getAssumedAlign()) {
4360             LI->setAlignment(Align(getAssumedAlign()));
4361             STATS_DECLTRACK(AAAlign, Load,
4362                             "Number of times alignment added to a load");
4363             LoadStoreChanged = ChangeStatus::CHANGED;
4364           }
4365       }
4366     }
4367 
4368     ChangeStatus Changed = AAAlign::manifest(A);
4369 
4370     Align InheritAlign =
4371         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4372     if (InheritAlign >= getAssumedAlign())
4373       return LoadStoreChanged;
4374     return Changed | LoadStoreChanged;
4375   }
4376 
4377   // TODO: Provide a helper to determine the implied ABI alignment and check in
4378   //       the existing manifest method and a new one for AAAlignImpl that value
4379   //       to avoid making the alignment explicit if it did not improve.
4380 
4381   /// See AbstractAttribute::getDeducedAttributes
4382   virtual void
4383   getDeducedAttributes(LLVMContext &Ctx,
4384                        SmallVectorImpl<Attribute> &Attrs) const override {
4385     if (getAssumedAlign() > 1)
4386       Attrs.emplace_back(
4387           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4388   }
4389 
4390   /// See followUsesInMBEC
4391   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4392                        AAAlign::StateType &State) {
4393     bool TrackUse = false;
4394 
4395     unsigned int KnownAlign =
4396         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4397     State.takeKnownMaximum(KnownAlign);
4398 
4399     return TrackUse;
4400   }
4401 
4402   /// See AbstractAttribute::getAsStr().
4403   const std::string getAsStr() const override {
4404     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4405                                 "-" + std::to_string(getAssumedAlign()) + ">")
4406                              : "unknown-align";
4407   }
4408 };
4409 
4410 /// Align attribute for a floating value.
4411 struct AAAlignFloating : AAAlignImpl {
4412   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4413 
4414   /// See AbstractAttribute::updateImpl(...).
4415   ChangeStatus updateImpl(Attributor &A) override {
4416     const DataLayout &DL = A.getDataLayout();
4417 
4418     auto VisitValueCB = [&](Value &V, const Instruction *,
4419                             AAAlign::StateType &T, bool Stripped) -> bool {
4420       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4421                                            DepClassTy::REQUIRED);
4422       if (!Stripped && this == &AA) {
4423         int64_t Offset;
4424         unsigned Alignment = 1;
4425         if (const Value *Base =
4426                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4427           Align PA = Base->getPointerAlignment(DL);
4428           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4429           // So we can say that the maximum power of two which is a divisor of
4430           // gcd(Offset, Alignment) is an alignment.
4431 
4432           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4433                                                uint32_t(PA.value()));
4434           Alignment = llvm::PowerOf2Floor(gcd);
4435         } else {
4436           Alignment = V.getPointerAlignment(DL).value();
4437         }
4438         // Use only IR information if we did not strip anything.
4439         T.takeKnownMaximum(Alignment);
4440         T.indicatePessimisticFixpoint();
4441       } else {
4442         // Use abstract attribute information.
4443         const AAAlign::StateType &DS = AA.getState();
4444         T ^= DS;
4445       }
4446       return T.isValidState();
4447     };
4448 
4449     StateType T;
4450     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4451                                           VisitValueCB, getCtxI()))
4452       return indicatePessimisticFixpoint();
4453 
4454     // TODO: If we know we visited all incoming values, thus no are assumed
4455     // dead, we can take the known information from the state T.
4456     return clampStateAndIndicateChange(getState(), T);
4457   }
4458 
4459   /// See AbstractAttribute::trackStatistics()
4460   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4461 };
4462 
4463 /// Align attribute for function return value.
4464 struct AAAlignReturned final
4465     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4466   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4467   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4468 
4469   /// See AbstractAttribute::initialize(...).
4470   void initialize(Attributor &A) override {
4471     Base::initialize(A);
4472     Function *F = getAssociatedFunction();
4473     if (!F || F->isDeclaration())
4474       indicatePessimisticFixpoint();
4475   }
4476 
4477   /// See AbstractAttribute::trackStatistics()
4478   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4479 };
4480 
4481 /// Align attribute for function argument.
4482 struct AAAlignArgument final
4483     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4484   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4485   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4486 
4487   /// See AbstractAttribute::manifest(...).
4488   ChangeStatus manifest(Attributor &A) override {
4489     // If the associated argument is involved in a must-tail call we give up
4490     // because we would need to keep the argument alignments of caller and
4491     // callee in-sync. Just does not seem worth the trouble right now.
4492     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4493       return ChangeStatus::UNCHANGED;
4494     return Base::manifest(A);
4495   }
4496 
4497   /// See AbstractAttribute::trackStatistics()
4498   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4499 };
4500 
4501 struct AAAlignCallSiteArgument final : AAAlignFloating {
4502   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4503       : AAAlignFloating(IRP, A) {}
4504 
4505   /// See AbstractAttribute::manifest(...).
4506   ChangeStatus manifest(Attributor &A) override {
4507     // If the associated argument is involved in a must-tail call we give up
4508     // because we would need to keep the argument alignments of caller and
4509     // callee in-sync. Just does not seem worth the trouble right now.
4510     if (Argument *Arg = getAssociatedArgument())
4511       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4512         return ChangeStatus::UNCHANGED;
4513     ChangeStatus Changed = AAAlignImpl::manifest(A);
4514     Align InheritAlign =
4515         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4516     if (InheritAlign >= getAssumedAlign())
4517       Changed = ChangeStatus::UNCHANGED;
4518     return Changed;
4519   }
4520 
4521   /// See AbstractAttribute::updateImpl(Attributor &A).
4522   ChangeStatus updateImpl(Attributor &A) override {
4523     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4524     if (Argument *Arg = getAssociatedArgument()) {
4525       // We only take known information from the argument
4526       // so we do not need to track a dependence.
4527       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4528           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4529       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4530     }
4531     return Changed;
4532   }
4533 
4534   /// See AbstractAttribute::trackStatistics()
4535   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4536 };
4537 
4538 /// Align attribute deduction for a call site return value.
4539 struct AAAlignCallSiteReturned final
4540     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4541   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4542   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4543       : Base(IRP, A) {}
4544 
4545   /// See AbstractAttribute::initialize(...).
4546   void initialize(Attributor &A) override {
4547     Base::initialize(A);
4548     Function *F = getAssociatedFunction();
4549     if (!F || F->isDeclaration())
4550       indicatePessimisticFixpoint();
4551   }
4552 
4553   /// See AbstractAttribute::trackStatistics()
4554   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4555 };
4556 
4557 /// ------------------ Function No-Return Attribute ----------------------------
4558 struct AANoReturnImpl : public AANoReturn {
4559   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4560 
4561   /// See AbstractAttribute::initialize(...).
4562   void initialize(Attributor &A) override {
4563     AANoReturn::initialize(A);
4564     Function *F = getAssociatedFunction();
4565     if (!F || F->isDeclaration())
4566       indicatePessimisticFixpoint();
4567   }
4568 
4569   /// See AbstractAttribute::getAsStr().
4570   const std::string getAsStr() const override {
4571     return getAssumed() ? "noreturn" : "may-return";
4572   }
4573 
4574   /// See AbstractAttribute::updateImpl(Attributor &A).
4575   virtual ChangeStatus updateImpl(Attributor &A) override {
4576     auto CheckForNoReturn = [](Instruction &) { return false; };
4577     bool UsedAssumedInformation = false;
4578     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4579                                    {(unsigned)Instruction::Ret},
4580                                    UsedAssumedInformation))
4581       return indicatePessimisticFixpoint();
4582     return ChangeStatus::UNCHANGED;
4583   }
4584 };
4585 
4586 struct AANoReturnFunction final : AANoReturnImpl {
4587   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4588       : AANoReturnImpl(IRP, A) {}
4589 
4590   /// See AbstractAttribute::trackStatistics()
4591   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4592 };
4593 
4594 /// NoReturn attribute deduction for a call sites.
4595 struct AANoReturnCallSite final : AANoReturnImpl {
4596   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4597       : AANoReturnImpl(IRP, A) {}
4598 
4599   /// See AbstractAttribute::initialize(...).
4600   void initialize(Attributor &A) override {
4601     AANoReturnImpl::initialize(A);
4602     if (Function *F = getAssociatedFunction()) {
4603       const IRPosition &FnPos = IRPosition::function(*F);
4604       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4605       if (!FnAA.isAssumedNoReturn())
4606         indicatePessimisticFixpoint();
4607     }
4608   }
4609 
4610   /// See AbstractAttribute::updateImpl(...).
4611   ChangeStatus updateImpl(Attributor &A) override {
4612     // TODO: Once we have call site specific value information we can provide
4613     //       call site specific liveness information and then it makes
4614     //       sense to specialize attributes for call sites arguments instead of
4615     //       redirecting requests to the callee argument.
4616     Function *F = getAssociatedFunction();
4617     const IRPosition &FnPos = IRPosition::function(*F);
4618     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4619     return clampStateAndIndicateChange(getState(), FnAA.getState());
4620   }
4621 
4622   /// See AbstractAttribute::trackStatistics()
4623   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4624 };
4625 
4626 /// ----------------------- Variable Capturing ---------------------------------
4627 
4628 /// A class to hold the state of for no-capture attributes.
4629 struct AANoCaptureImpl : public AANoCapture {
4630   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4631 
4632   /// See AbstractAttribute::initialize(...).
4633   void initialize(Attributor &A) override {
4634     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4635       indicateOptimisticFixpoint();
4636       return;
4637     }
4638     Function *AnchorScope = getAnchorScope();
4639     if (isFnInterfaceKind() &&
4640         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4641       indicatePessimisticFixpoint();
4642       return;
4643     }
4644 
4645     // You cannot "capture" null in the default address space.
4646     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4647         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4648       indicateOptimisticFixpoint();
4649       return;
4650     }
4651 
4652     const Function *F =
4653         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4654 
4655     // Check what state the associated function can actually capture.
4656     if (F)
4657       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4658     else
4659       indicatePessimisticFixpoint();
4660   }
4661 
4662   /// See AbstractAttribute::updateImpl(...).
4663   ChangeStatus updateImpl(Attributor &A) override;
4664 
4665   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4666   virtual void
4667   getDeducedAttributes(LLVMContext &Ctx,
4668                        SmallVectorImpl<Attribute> &Attrs) const override {
4669     if (!isAssumedNoCaptureMaybeReturned())
4670       return;
4671 
4672     if (isArgumentPosition()) {
4673       if (isAssumedNoCapture())
4674         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4675       else if (ManifestInternal)
4676         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4677     }
4678   }
4679 
4680   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4681   /// depending on the ability of the function associated with \p IRP to capture
4682   /// state in memory and through "returning/throwing", respectively.
4683   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4684                                                    const Function &F,
4685                                                    BitIntegerState &State) {
4686     // TODO: Once we have memory behavior attributes we should use them here.
4687 
4688     // If we know we cannot communicate or write to memory, we do not care about
4689     // ptr2int anymore.
4690     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4691         F.getReturnType()->isVoidTy()) {
4692       State.addKnownBits(NO_CAPTURE);
4693       return;
4694     }
4695 
4696     // A function cannot capture state in memory if it only reads memory, it can
4697     // however return/throw state and the state might be influenced by the
4698     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4699     if (F.onlyReadsMemory())
4700       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4701 
4702     // A function cannot communicate state back if it does not through
4703     // exceptions and doesn not return values.
4704     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4705       State.addKnownBits(NOT_CAPTURED_IN_RET);
4706 
4707     // Check existing "returned" attributes.
4708     int ArgNo = IRP.getCalleeArgNo();
4709     if (F.doesNotThrow() && ArgNo >= 0) {
4710       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4711         if (F.hasParamAttribute(u, Attribute::Returned)) {
4712           if (u == unsigned(ArgNo))
4713             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4714           else if (F.onlyReadsMemory())
4715             State.addKnownBits(NO_CAPTURE);
4716           else
4717             State.addKnownBits(NOT_CAPTURED_IN_RET);
4718           break;
4719         }
4720     }
4721   }
4722 
4723   /// See AbstractState::getAsStr().
4724   const std::string getAsStr() const override {
4725     if (isKnownNoCapture())
4726       return "known not-captured";
4727     if (isAssumedNoCapture())
4728       return "assumed not-captured";
4729     if (isKnownNoCaptureMaybeReturned())
4730       return "known not-captured-maybe-returned";
4731     if (isAssumedNoCaptureMaybeReturned())
4732       return "assumed not-captured-maybe-returned";
4733     return "assumed-captured";
4734   }
4735 };
4736 
4737 /// Attributor-aware capture tracker.
4738 struct AACaptureUseTracker final : public CaptureTracker {
4739 
4740   /// Create a capture tracker that can lookup in-flight abstract attributes
4741   /// through the Attributor \p A.
4742   ///
4743   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4744   /// search is stopped. If a use leads to a return instruction,
4745   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4746   /// If a use leads to a ptr2int which may capture the value,
4747   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4748   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4749   /// set. All values in \p PotentialCopies are later tracked as well. For every
4750   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4751   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4752   /// conservatively set to true.
4753   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4754                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4755                       SmallSetVector<Value *, 4> &PotentialCopies,
4756                       unsigned &RemainingUsesToExplore)
4757       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4758         PotentialCopies(PotentialCopies),
4759         RemainingUsesToExplore(RemainingUsesToExplore) {}
4760 
4761   /// Determine if \p V maybe captured. *Also updates the state!*
4762   bool valueMayBeCaptured(const Value *V) {
4763     if (V->getType()->isPointerTy()) {
4764       PointerMayBeCaptured(V, this);
4765     } else {
4766       State.indicatePessimisticFixpoint();
4767     }
4768     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4769   }
4770 
4771   /// See CaptureTracker::tooManyUses().
4772   void tooManyUses() override {
4773     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4774   }
4775 
4776   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4777     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4778       return true;
4779     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4780         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4781     return DerefAA.getAssumedDereferenceableBytes();
4782   }
4783 
4784   /// See CaptureTracker::captured(...).
4785   bool captured(const Use *U) override {
4786     Instruction *UInst = cast<Instruction>(U->getUser());
4787     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4788                       << "\n");
4789 
4790     // Because we may reuse the tracker multiple times we keep track of the
4791     // number of explored uses ourselves as well.
4792     if (RemainingUsesToExplore-- == 0) {
4793       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4794       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4795                           /* Return */ true);
4796     }
4797 
4798     // Deal with ptr2int by following uses.
4799     if (isa<PtrToIntInst>(UInst)) {
4800       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4801       return valueMayBeCaptured(UInst);
4802     }
4803 
4804     // For stores we check if we can follow the value through memory or not.
4805     if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4806       if (SI->isVolatile())
4807         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4808                             /* Return */ false);
4809       bool UsedAssumedInformation = false;
4810       if (!AA::getPotentialCopiesOfStoredValue(
4811               A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4812         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4813                             /* Return */ false);
4814       // Not captured directly, potential copies will be checked.
4815       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4816                           /* Return */ false);
4817     }
4818 
4819     // Explicitly catch return instructions.
4820     if (isa<ReturnInst>(UInst)) {
4821       if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4822         return isCapturedIn(/* Memory */ false, /* Integer */ false,
4823                             /* Return */ true);
4824       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4825                           /* Return */ true);
4826     }
4827 
4828     // For now we only use special logic for call sites. However, the tracker
4829     // itself knows about a lot of other non-capturing cases already.
4830     auto *CB = dyn_cast<CallBase>(UInst);
4831     if (!CB || !CB->isArgOperand(U))
4832       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4833                           /* Return */ true);
4834 
4835     unsigned ArgNo = CB->getArgOperandNo(U);
4836     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4837     // If we have a abstract no-capture attribute for the argument we can use
4838     // it to justify a non-capture attribute here. This allows recursion!
4839     auto &ArgNoCaptureAA =
4840         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4841     if (ArgNoCaptureAA.isAssumedNoCapture())
4842       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4843                           /* Return */ false);
4844     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4845       addPotentialCopy(*CB);
4846       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4847                           /* Return */ false);
4848     }
4849 
4850     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4851     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4852                         /* Return */ true);
4853   }
4854 
4855   /// Register \p CS as potential copy of the value we are checking.
4856   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4857 
4858   /// See CaptureTracker::shouldExplore(...).
4859   bool shouldExplore(const Use *U) override {
4860     // Check liveness and ignore droppable users.
4861     bool UsedAssumedInformation = false;
4862     return !U->getUser()->isDroppable() &&
4863            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4864                             UsedAssumedInformation);
4865   }
4866 
4867   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4868   /// \p CapturedInRet, then return the appropriate value for use in the
4869   /// CaptureTracker::captured() interface.
4870   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4871                     bool CapturedInRet) {
4872     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4873                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4874     if (CapturedInMem)
4875       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4876     if (CapturedInInt)
4877       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4878     if (CapturedInRet)
4879       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4880     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4881   }
4882 
4883 private:
4884   /// The attributor providing in-flight abstract attributes.
4885   Attributor &A;
4886 
4887   /// The abstract attribute currently updated.
4888   AANoCapture &NoCaptureAA;
4889 
4890   /// The abstract liveness state.
4891   const AAIsDead &IsDeadAA;
4892 
4893   /// The state currently updated.
4894   AANoCapture::StateType &State;
4895 
4896   /// Set of potential copies of the tracked value.
4897   SmallSetVector<Value *, 4> &PotentialCopies;
4898 
4899   /// Global counter to limit the number of explored uses.
4900   unsigned &RemainingUsesToExplore;
4901 };
4902 
4903 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4904   const IRPosition &IRP = getIRPosition();
4905   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4906                                   : &IRP.getAssociatedValue();
4907   if (!V)
4908     return indicatePessimisticFixpoint();
4909 
4910   const Function *F =
4911       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4912   assert(F && "Expected a function!");
4913   const IRPosition &FnPos = IRPosition::function(*F);
4914   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4915 
4916   AANoCapture::StateType T;
4917 
4918   // Readonly means we cannot capture through memory.
4919   const auto &FnMemAA =
4920       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4921   if (FnMemAA.isAssumedReadOnly()) {
4922     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4923     if (FnMemAA.isKnownReadOnly())
4924       addKnownBits(NOT_CAPTURED_IN_MEM);
4925     else
4926       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4927   }
4928 
4929   // Make sure all returned values are different than the underlying value.
4930   // TODO: we could do this in a more sophisticated way inside
4931   //       AAReturnedValues, e.g., track all values that escape through returns
4932   //       directly somehow.
4933   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4934     bool SeenConstant = false;
4935     for (auto &It : RVAA.returned_values()) {
4936       if (isa<Constant>(It.first)) {
4937         if (SeenConstant)
4938           return false;
4939         SeenConstant = true;
4940       } else if (!isa<Argument>(It.first) ||
4941                  It.first == getAssociatedArgument())
4942         return false;
4943     }
4944     return true;
4945   };
4946 
4947   const auto &NoUnwindAA =
4948       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4949   if (NoUnwindAA.isAssumedNoUnwind()) {
4950     bool IsVoidTy = F->getReturnType()->isVoidTy();
4951     const AAReturnedValues *RVAA =
4952         IsVoidTy ? nullptr
4953                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4954 
4955                                                  DepClassTy::OPTIONAL);
4956     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4957       T.addKnownBits(NOT_CAPTURED_IN_RET);
4958       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4959         return ChangeStatus::UNCHANGED;
4960       if (NoUnwindAA.isKnownNoUnwind() &&
4961           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4962         addKnownBits(NOT_CAPTURED_IN_RET);
4963         if (isKnown(NOT_CAPTURED_IN_MEM))
4964           return indicateOptimisticFixpoint();
4965       }
4966     }
4967   }
4968 
4969   // Use the CaptureTracker interface and logic with the specialized tracker,
4970   // defined in AACaptureUseTracker, that can look at in-flight abstract
4971   // attributes and directly updates the assumed state.
4972   SmallSetVector<Value *, 4> PotentialCopies;
4973   unsigned RemainingUsesToExplore =
4974       getDefaultMaxUsesToExploreForCaptureTracking();
4975   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4976                               RemainingUsesToExplore);
4977 
4978   // Check all potential copies of the associated value until we can assume
4979   // none will be captured or we have to assume at least one might be.
4980   unsigned Idx = 0;
4981   PotentialCopies.insert(V);
4982   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4983     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4984 
4985   AANoCapture::StateType &S = getState();
4986   auto Assumed = S.getAssumed();
4987   S.intersectAssumedBits(T.getAssumed());
4988   if (!isAssumedNoCaptureMaybeReturned())
4989     return indicatePessimisticFixpoint();
4990   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4991                                    : ChangeStatus::CHANGED;
4992 }
4993 
4994 /// NoCapture attribute for function arguments.
4995 struct AANoCaptureArgument final : AANoCaptureImpl {
4996   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4997       : AANoCaptureImpl(IRP, A) {}
4998 
4999   /// See AbstractAttribute::trackStatistics()
5000   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5001 };
5002 
5003 /// NoCapture attribute for call site arguments.
5004 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5005   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5006       : AANoCaptureImpl(IRP, A) {}
5007 
5008   /// See AbstractAttribute::initialize(...).
5009   void initialize(Attributor &A) override {
5010     if (Argument *Arg = getAssociatedArgument())
5011       if (Arg->hasByValAttr())
5012         indicateOptimisticFixpoint();
5013     AANoCaptureImpl::initialize(A);
5014   }
5015 
5016   /// See AbstractAttribute::updateImpl(...).
5017   ChangeStatus updateImpl(Attributor &A) override {
5018     // TODO: Once we have call site specific value information we can provide
5019     //       call site specific liveness information and then it makes
5020     //       sense to specialize attributes for call sites arguments instead of
5021     //       redirecting requests to the callee argument.
5022     Argument *Arg = getAssociatedArgument();
5023     if (!Arg)
5024       return indicatePessimisticFixpoint();
5025     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5026     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5027     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5028   }
5029 
5030   /// See AbstractAttribute::trackStatistics()
5031   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5032 };
5033 
5034 /// NoCapture attribute for floating values.
5035 struct AANoCaptureFloating final : AANoCaptureImpl {
5036   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5037       : AANoCaptureImpl(IRP, A) {}
5038 
5039   /// See AbstractAttribute::trackStatistics()
5040   void trackStatistics() const override {
5041     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5042   }
5043 };
5044 
5045 /// NoCapture attribute for function return value.
5046 struct AANoCaptureReturned final : AANoCaptureImpl {
5047   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5048       : AANoCaptureImpl(IRP, A) {
5049     llvm_unreachable("NoCapture is not applicable to function returns!");
5050   }
5051 
5052   /// See AbstractAttribute::initialize(...).
5053   void initialize(Attributor &A) override {
5054     llvm_unreachable("NoCapture is not applicable to function returns!");
5055   }
5056 
5057   /// See AbstractAttribute::updateImpl(...).
5058   ChangeStatus updateImpl(Attributor &A) override {
5059     llvm_unreachable("NoCapture is not applicable to function returns!");
5060   }
5061 
5062   /// See AbstractAttribute::trackStatistics()
5063   void trackStatistics() const override {}
5064 };
5065 
5066 /// NoCapture attribute deduction for a call site return value.
5067 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5068   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5069       : AANoCaptureImpl(IRP, A) {}
5070 
5071   /// See AbstractAttribute::initialize(...).
5072   void initialize(Attributor &A) override {
5073     const Function *F = getAnchorScope();
5074     // Check what state the associated function can actually capture.
5075     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5076   }
5077 
5078   /// See AbstractAttribute::trackStatistics()
5079   void trackStatistics() const override {
5080     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5081   }
5082 };
5083 } // namespace
5084 
5085 /// ------------------ Value Simplify Attribute ----------------------------
5086 
5087 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5088   // FIXME: Add a typecast support.
5089   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5090       SimplifiedAssociatedValue, Other, Ty);
5091   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5092     return false;
5093 
5094   LLVM_DEBUG({
5095     if (SimplifiedAssociatedValue.hasValue())
5096       dbgs() << "[ValueSimplify] is assumed to be "
5097              << **SimplifiedAssociatedValue << "\n";
5098     else
5099       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5100   });
5101   return true;
5102 }
5103 
5104 namespace {
5105 struct AAValueSimplifyImpl : AAValueSimplify {
5106   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5107       : AAValueSimplify(IRP, A) {}
5108 
5109   /// See AbstractAttribute::initialize(...).
5110   void initialize(Attributor &A) override {
5111     if (getAssociatedValue().getType()->isVoidTy())
5112       indicatePessimisticFixpoint();
5113     if (A.hasSimplificationCallback(getIRPosition()))
5114       indicatePessimisticFixpoint();
5115   }
5116 
5117   /// See AbstractAttribute::getAsStr().
5118   const std::string getAsStr() const override {
5119     LLVM_DEBUG({
5120       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5121       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5122         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5123     });
5124     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5125                           : "not-simple";
5126   }
5127 
5128   /// See AbstractAttribute::trackStatistics()
5129   void trackStatistics() const override {}
5130 
5131   /// See AAValueSimplify::getAssumedSimplifiedValue()
5132   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5133     return SimplifiedAssociatedValue;
5134   }
5135 
5136   /// Return a value we can use as replacement for the associated one, or
5137   /// nullptr if we don't have one that makes sense.
5138   Value *getReplacementValue(Attributor &A) const {
5139     Value *NewV;
5140     NewV = SimplifiedAssociatedValue.hasValue()
5141                ? SimplifiedAssociatedValue.getValue()
5142                : UndefValue::get(getAssociatedType());
5143     if (!NewV)
5144       return nullptr;
5145     NewV = AA::getWithType(*NewV, *getAssociatedType());
5146     if (!NewV || NewV == &getAssociatedValue())
5147       return nullptr;
5148     const Instruction *CtxI = getCtxI();
5149     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5150       return nullptr;
5151     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5152       return nullptr;
5153     return NewV;
5154   }
5155 
5156   /// Helper function for querying AAValueSimplify and updating candicate.
5157   /// \param IRP The value position we are trying to unify with SimplifiedValue
5158   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5159                       const IRPosition &IRP, bool Simplify = true) {
5160     bool UsedAssumedInformation = false;
5161     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5162     if (Simplify)
5163       QueryingValueSimplified =
5164           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5165     return unionAssumed(QueryingValueSimplified);
5166   }
5167 
5168   /// Returns a candidate is found or not
5169   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5170     if (!getAssociatedValue().getType()->isIntegerTy())
5171       return false;
5172 
5173     // This will also pass the call base context.
5174     const auto &AA =
5175         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5176 
5177     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5178 
5179     if (!COpt.hasValue()) {
5180       SimplifiedAssociatedValue = llvm::None;
5181       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5182       return true;
5183     }
5184     if (auto *C = COpt.getValue()) {
5185       SimplifiedAssociatedValue = C;
5186       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5187       return true;
5188     }
5189     return false;
5190   }
5191 
5192   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5193     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5194       return true;
5195     if (askSimplifiedValueFor<AAPotentialValues>(A))
5196       return true;
5197     return false;
5198   }
5199 
5200   /// See AbstractAttribute::manifest(...).
5201   ChangeStatus manifest(Attributor &A) override {
5202     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5203     if (getAssociatedValue().user_empty())
5204       return Changed;
5205 
5206     if (auto *NewV = getReplacementValue(A)) {
5207       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5208                         << *NewV << " :: " << *this << "\n");
5209       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5210         Changed = ChangeStatus::CHANGED;
5211     }
5212 
5213     return Changed | AAValueSimplify::manifest(A);
5214   }
5215 
5216   /// See AbstractState::indicatePessimisticFixpoint(...).
5217   ChangeStatus indicatePessimisticFixpoint() override {
5218     SimplifiedAssociatedValue = &getAssociatedValue();
5219     return AAValueSimplify::indicatePessimisticFixpoint();
5220   }
5221 
5222   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5223                          LoadInst &L, function_ref<bool(Value &)> Union) {
5224     auto UnionWrapper = [&](Value &V, Value &Obj) {
5225       if (isa<AllocaInst>(Obj))
5226         return Union(V);
5227       if (!AA::isDynamicallyUnique(A, AA, V))
5228         return false;
5229       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5230         return false;
5231       return Union(V);
5232     };
5233 
5234     Value &Ptr = *L.getPointerOperand();
5235     SmallVector<Value *, 8> Objects;
5236     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5237       return false;
5238 
5239     for (Value *Obj : Objects) {
5240       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5241       if (isa<UndefValue>(Obj))
5242         continue;
5243       if (isa<ConstantPointerNull>(Obj)) {
5244         // A null pointer access can be undefined but any offset from null may
5245         // be OK. We do not try to optimize the latter.
5246         bool UsedAssumedInformation = false;
5247         if (!NullPointerIsDefined(L.getFunction(),
5248                                   Ptr.getType()->getPointerAddressSpace()) &&
5249             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5250           continue;
5251         return false;
5252       }
5253       if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
5254         return false;
5255       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
5256       if (!InitialVal || !Union(*InitialVal))
5257         return false;
5258 
5259       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5260                            "propagation, checking accesses next.\n");
5261 
5262       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5263         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5264         if (!Acc.isWrite())
5265           return true;
5266         if (Acc.isWrittenValueYetUndetermined())
5267           return true;
5268         Value *Content = Acc.getWrittenValue();
5269         if (!Content)
5270           return false;
5271         Value *CastedContent =
5272             AA::getWithType(*Content, *AA.getAssociatedType());
5273         if (!CastedContent)
5274           return false;
5275         if (IsExact)
5276           return UnionWrapper(*CastedContent, *Obj);
5277         if (auto *C = dyn_cast<Constant>(CastedContent))
5278           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5279             return UnionWrapper(*CastedContent, *Obj);
5280         return false;
5281       };
5282 
5283       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5284                                            DepClassTy::REQUIRED);
5285       if (!PI.forallInterferingAccesses(L, CheckAccess))
5286         return false;
5287     }
5288     return true;
5289   }
5290 };
5291 
5292 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5293   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5294       : AAValueSimplifyImpl(IRP, A) {}
5295 
5296   void initialize(Attributor &A) override {
5297     AAValueSimplifyImpl::initialize(A);
5298     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5299       indicatePessimisticFixpoint();
5300     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5301                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5302                 /* IgnoreSubsumingPositions */ true))
5303       indicatePessimisticFixpoint();
5304 
5305     // FIXME: This is a hack to prevent us from propagating function poiner in
5306     // the new pass manager CGSCC pass as it creates call edges the
5307     // CallGraphUpdater cannot handle yet.
5308     Value &V = getAssociatedValue();
5309     if (V.getType()->isPointerTy() &&
5310         V.getType()->getPointerElementType()->isFunctionTy() &&
5311         !A.isModulePass())
5312       indicatePessimisticFixpoint();
5313   }
5314 
5315   /// See AbstractAttribute::updateImpl(...).
5316   ChangeStatus updateImpl(Attributor &A) override {
5317     // Byval is only replacable if it is readonly otherwise we would write into
5318     // the replaced value and not the copy that byval creates implicitly.
5319     Argument *Arg = getAssociatedArgument();
5320     if (Arg->hasByValAttr()) {
5321       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5322       //       there is no race by not copying a constant byval.
5323       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5324                                                        DepClassTy::REQUIRED);
5325       if (!MemAA.isAssumedReadOnly())
5326         return indicatePessimisticFixpoint();
5327     }
5328 
5329     auto Before = SimplifiedAssociatedValue;
5330 
5331     auto PredForCallSite = [&](AbstractCallSite ACS) {
5332       const IRPosition &ACSArgPos =
5333           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5334       // Check if a coresponding argument was found or if it is on not
5335       // associated (which can happen for callback calls).
5336       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5337         return false;
5338 
5339       // Simplify the argument operand explicitly and check if the result is
5340       // valid in the current scope. This avoids refering to simplified values
5341       // in other functions, e.g., we don't want to say a an argument in a
5342       // static function is actually an argument in a different function.
5343       bool UsedAssumedInformation = false;
5344       Optional<Constant *> SimpleArgOp =
5345           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5346       if (!SimpleArgOp.hasValue())
5347         return true;
5348       if (!SimpleArgOp.getValue())
5349         return false;
5350       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5351         return false;
5352       return unionAssumed(*SimpleArgOp);
5353     };
5354 
5355     // Generate a answer specific to a call site context.
5356     bool Success;
5357     bool AllCallSitesKnown;
5358     if (hasCallBaseContext() &&
5359         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5360       Success = PredForCallSite(
5361           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5362     else
5363       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5364                                        AllCallSitesKnown);
5365 
5366     if (!Success)
5367       if (!askSimplifiedValueForOtherAAs(A))
5368         return indicatePessimisticFixpoint();
5369 
5370     // If a candicate was found in this update, return CHANGED.
5371     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5372                                                : ChangeStatus ::CHANGED;
5373   }
5374 
5375   /// See AbstractAttribute::trackStatistics()
5376   void trackStatistics() const override {
5377     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5378   }
5379 };
5380 
5381 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5382   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5383       : AAValueSimplifyImpl(IRP, A) {}
5384 
5385   /// See AAValueSimplify::getAssumedSimplifiedValue()
5386   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5387     if (!isValidState())
5388       return nullptr;
5389     return SimplifiedAssociatedValue;
5390   }
5391 
5392   /// See AbstractAttribute::updateImpl(...).
5393   ChangeStatus updateImpl(Attributor &A) override {
5394     auto Before = SimplifiedAssociatedValue;
5395 
5396     auto PredForReturned = [&](Value &V) {
5397       return checkAndUpdate(A, *this,
5398                             IRPosition::value(V, getCallBaseContext()));
5399     };
5400 
5401     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5402       if (!askSimplifiedValueForOtherAAs(A))
5403         return indicatePessimisticFixpoint();
5404 
5405     // If a candicate was found in this update, return CHANGED.
5406     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5407                                                : ChangeStatus ::CHANGED;
5408   }
5409 
5410   ChangeStatus manifest(Attributor &A) override {
5411     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5412 
5413     if (auto *NewV = getReplacementValue(A)) {
5414       auto PredForReturned =
5415           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5416             for (ReturnInst *RI : RetInsts) {
5417               Value *ReturnedVal = RI->getReturnValue();
5418               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5419                 return true;
5420               assert(RI->getFunction() == getAnchorScope() &&
5421                      "ReturnInst in wrong function!");
5422               LLVM_DEBUG(dbgs()
5423                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5424                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5425               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5426                 Changed = ChangeStatus::CHANGED;
5427             }
5428             return true;
5429           };
5430       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5431     }
5432 
5433     return Changed | AAValueSimplify::manifest(A);
5434   }
5435 
5436   /// See AbstractAttribute::trackStatistics()
5437   void trackStatistics() const override {
5438     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5439   }
5440 };
5441 
5442 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5443   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5444       : AAValueSimplifyImpl(IRP, A) {}
5445 
5446   /// See AbstractAttribute::initialize(...).
5447   void initialize(Attributor &A) override {
5448     AAValueSimplifyImpl::initialize(A);
5449     Value &V = getAnchorValue();
5450 
5451     // TODO: add other stuffs
5452     if (isa<Constant>(V))
5453       indicatePessimisticFixpoint();
5454   }
5455 
5456   /// Check if \p Cmp is a comparison we can simplify.
5457   ///
5458   /// We handle multiple cases, one in which at least one operand is an
5459   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5460   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5461   /// will be updated.
5462   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5463     auto Union = [&](Value &V) {
5464       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5465           SimplifiedAssociatedValue, &V, V.getType());
5466       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5467     };
5468 
5469     Value *LHS = Cmp.getOperand(0);
5470     Value *RHS = Cmp.getOperand(1);
5471 
5472     // Simplify the operands first.
5473     bool UsedAssumedInformation = false;
5474     const auto &SimplifiedLHS =
5475         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5476                                *this, UsedAssumedInformation);
5477     if (!SimplifiedLHS.hasValue())
5478       return true;
5479     if (!SimplifiedLHS.getValue())
5480       return false;
5481     LHS = *SimplifiedLHS;
5482 
5483     const auto &SimplifiedRHS =
5484         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5485                                *this, UsedAssumedInformation);
5486     if (!SimplifiedRHS.hasValue())
5487       return true;
5488     if (!SimplifiedRHS.getValue())
5489       return false;
5490     RHS = *SimplifiedRHS;
5491 
5492     LLVMContext &Ctx = Cmp.getContext();
5493     // Handle the trivial case first in which we don't even need to think about
5494     // null or non-null.
5495     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5496       Constant *NewVal =
5497           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5498       if (!Union(*NewVal))
5499         return false;
5500       if (!UsedAssumedInformation)
5501         indicateOptimisticFixpoint();
5502       return true;
5503     }
5504 
5505     // From now on we only handle equalities (==, !=).
5506     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5507     if (!ICmp || !ICmp->isEquality())
5508       return false;
5509 
5510     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5511     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5512     if (!LHSIsNull && !RHSIsNull)
5513       return false;
5514 
5515     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5516     // non-nullptr operand and if we assume it's non-null we can conclude the
5517     // result of the comparison.
5518     assert((LHSIsNull || RHSIsNull) &&
5519            "Expected nullptr versus non-nullptr comparison at this point");
5520 
5521     // The index is the operand that we assume is not null.
5522     unsigned PtrIdx = LHSIsNull;
5523     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5524         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5525         DepClassTy::REQUIRED);
5526     if (!PtrNonNullAA.isAssumedNonNull())
5527       return false;
5528     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5529 
5530     // The new value depends on the predicate, true for != and false for ==.
5531     Constant *NewVal = ConstantInt::get(
5532         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5533     if (!Union(*NewVal))
5534       return false;
5535 
5536     if (!UsedAssumedInformation)
5537       indicateOptimisticFixpoint();
5538 
5539     return true;
5540   }
5541 
5542   bool updateWithLoad(Attributor &A, LoadInst &L) {
5543     auto Union = [&](Value &V) {
5544       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5545           SimplifiedAssociatedValue, &V, L.getType());
5546       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5547     };
5548     return handleLoad(A, *this, L, Union);
5549   }
5550 
5551   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5552   /// simplify any operand of the instruction \p I. Return true if successful,
5553   /// in that case SimplifiedAssociatedValue will be updated.
5554   bool handleGenericInst(Attributor &A, Instruction &I) {
5555     bool SomeSimplified = false;
5556     bool UsedAssumedInformation = false;
5557 
5558     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5559     int Idx = 0;
5560     for (Value *Op : I.operands()) {
5561       const auto &SimplifiedOp =
5562           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5563                                  *this, UsedAssumedInformation);
5564       // If we are not sure about any operand we are not sure about the entire
5565       // instruction, we'll wait.
5566       if (!SimplifiedOp.hasValue())
5567         return true;
5568 
5569       if (SimplifiedOp.getValue())
5570         NewOps[Idx] = SimplifiedOp.getValue();
5571       else
5572         NewOps[Idx] = Op;
5573 
5574       SomeSimplified |= (NewOps[Idx] != Op);
5575       ++Idx;
5576     }
5577 
5578     // We won't bother with the InstSimplify interface if we didn't simplify any
5579     // operand ourselves.
5580     if (!SomeSimplified)
5581       return false;
5582 
5583     InformationCache &InfoCache = A.getInfoCache();
5584     Function *F = I.getFunction();
5585     const auto *DT =
5586         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5587     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5588     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5589     OptimizationRemarkEmitter *ORE = nullptr;
5590 
5591     const DataLayout &DL = I.getModule()->getDataLayout();
5592     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5593     if (Value *SimplifiedI =
5594             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5595       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5596           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5597       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5598     }
5599     return false;
5600   }
5601 
5602   /// See AbstractAttribute::updateImpl(...).
5603   ChangeStatus updateImpl(Attributor &A) override {
5604     auto Before = SimplifiedAssociatedValue;
5605 
5606     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5607                             bool Stripped) -> bool {
5608       auto &AA = A.getAAFor<AAValueSimplify>(
5609           *this, IRPosition::value(V, getCallBaseContext()),
5610           DepClassTy::REQUIRED);
5611       if (!Stripped && this == &AA) {
5612 
5613         if (auto *I = dyn_cast<Instruction>(&V)) {
5614           if (auto *LI = dyn_cast<LoadInst>(&V))
5615             if (updateWithLoad(A, *LI))
5616               return true;
5617           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5618             if (handleCmp(A, *Cmp))
5619               return true;
5620           if (handleGenericInst(A, *I))
5621             return true;
5622         }
5623         // TODO: Look the instruction and check recursively.
5624 
5625         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5626                           << "\n");
5627         return false;
5628       }
5629       return checkAndUpdate(A, *this,
5630                             IRPosition::value(V, getCallBaseContext()));
5631     };
5632 
5633     bool Dummy = false;
5634     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5635                                      VisitValueCB, getCtxI(),
5636                                      /* UseValueSimplify */ false))
5637       if (!askSimplifiedValueForOtherAAs(A))
5638         return indicatePessimisticFixpoint();
5639 
5640     // If a candicate was found in this update, return CHANGED.
5641     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5642                                                : ChangeStatus ::CHANGED;
5643   }
5644 
5645   /// See AbstractAttribute::trackStatistics()
5646   void trackStatistics() const override {
5647     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5648   }
5649 };
5650 
5651 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5652   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5653       : AAValueSimplifyImpl(IRP, A) {}
5654 
5655   /// See AbstractAttribute::initialize(...).
5656   void initialize(Attributor &A) override {
5657     SimplifiedAssociatedValue = nullptr;
5658     indicateOptimisticFixpoint();
5659   }
5660   /// See AbstractAttribute::initialize(...).
5661   ChangeStatus updateImpl(Attributor &A) override {
5662     llvm_unreachable(
5663         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5664   }
5665   /// See AbstractAttribute::trackStatistics()
5666   void trackStatistics() const override {
5667     STATS_DECLTRACK_FN_ATTR(value_simplify)
5668   }
5669 };
5670 
5671 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5672   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5673       : AAValueSimplifyFunction(IRP, A) {}
5674   /// See AbstractAttribute::trackStatistics()
5675   void trackStatistics() const override {
5676     STATS_DECLTRACK_CS_ATTR(value_simplify)
5677   }
5678 };
5679 
5680 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5681   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5682       : AAValueSimplifyImpl(IRP, A) {}
5683 
5684   void initialize(Attributor &A) override {
5685     AAValueSimplifyImpl::initialize(A);
5686     if (!getAssociatedFunction())
5687       indicatePessimisticFixpoint();
5688   }
5689 
5690   /// See AbstractAttribute::updateImpl(...).
5691   ChangeStatus updateImpl(Attributor &A) override {
5692     auto Before = SimplifiedAssociatedValue;
5693     auto &RetAA = A.getAAFor<AAReturnedValues>(
5694         *this, IRPosition::function(*getAssociatedFunction()),
5695         DepClassTy::REQUIRED);
5696     auto PredForReturned =
5697         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5698           bool UsedAssumedInformation = false;
5699           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5700               &RetVal, *cast<CallBase>(getCtxI()), *this,
5701               UsedAssumedInformation);
5702           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5703               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5704           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5705         };
5706     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5707       if (!askSimplifiedValueForOtherAAs(A))
5708         return indicatePessimisticFixpoint();
5709     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5710                                                : ChangeStatus ::CHANGED;
5711   }
5712 
5713   void trackStatistics() const override {
5714     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5715   }
5716 };
5717 
5718 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5719   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5720       : AAValueSimplifyFloating(IRP, A) {}
5721 
5722   /// See AbstractAttribute::manifest(...).
5723   ChangeStatus manifest(Attributor &A) override {
5724     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5725 
5726     if (auto *NewV = getReplacementValue(A)) {
5727       Use &U = cast<CallBase>(&getAnchorValue())
5728                    ->getArgOperandUse(getCallSiteArgNo());
5729       if (A.changeUseAfterManifest(U, *NewV))
5730         Changed = ChangeStatus::CHANGED;
5731     }
5732 
5733     return Changed | AAValueSimplify::manifest(A);
5734   }
5735 
5736   void trackStatistics() const override {
5737     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5738   }
5739 };
5740 
5741 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5742 struct AAHeapToStackFunction final : public AAHeapToStack {
5743 
5744   struct AllocationInfo {
5745     /// The call that allocates the memory.
5746     CallBase *const CB;
5747 
5748     /// The kind of allocation.
5749     const enum class AllocationKind {
5750       MALLOC,
5751       CALLOC,
5752       ALIGNED_ALLOC,
5753     } Kind;
5754 
5755     /// The library function id for the allocation.
5756     LibFunc LibraryFunctionId = NotLibFunc;
5757 
5758     /// The status wrt. a rewrite.
5759     enum {
5760       STACK_DUE_TO_USE,
5761       STACK_DUE_TO_FREE,
5762       INVALID,
5763     } Status = STACK_DUE_TO_USE;
5764 
5765     /// Flag to indicate if we encountered a use that might free this allocation
5766     /// but which is not in the deallocation infos.
5767     bool HasPotentiallyFreeingUnknownUses = false;
5768 
5769     /// The set of free calls that use this allocation.
5770     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5771   };
5772 
5773   struct DeallocationInfo {
5774     /// The call that deallocates the memory.
5775     CallBase *const CB;
5776 
5777     /// Flag to indicate if we don't know all objects this deallocation might
5778     /// free.
5779     bool MightFreeUnknownObjects = false;
5780 
5781     /// The set of allocation calls that are potentially freed.
5782     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5783   };
5784 
5785   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5786       : AAHeapToStack(IRP, A) {}
5787 
5788   ~AAHeapToStackFunction() {
5789     // Ensure we call the destructor so we release any memory allocated in the
5790     // sets.
5791     for (auto &It : AllocationInfos)
5792       It.getSecond()->~AllocationInfo();
5793     for (auto &It : DeallocationInfos)
5794       It.getSecond()->~DeallocationInfo();
5795   }
5796 
5797   void initialize(Attributor &A) override {
5798     AAHeapToStack::initialize(A);
5799 
5800     const Function *F = getAnchorScope();
5801     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5802 
5803     auto AllocationIdentifierCB = [&](Instruction &I) {
5804       CallBase *CB = dyn_cast<CallBase>(&I);
5805       if (!CB)
5806         return true;
5807       if (isFreeCall(CB, TLI)) {
5808         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5809         return true;
5810       }
5811       bool IsMalloc = isMallocLikeFn(CB, TLI);
5812       bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
5813       bool IsCalloc =
5814           !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
5815       if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
5816         return true;
5817       auto Kind =
5818           IsMalloc ? AllocationInfo::AllocationKind::MALLOC
5819                    : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
5820                                : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
5821 
5822       AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
5823       AllocationInfos[CB] = AI;
5824       TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5825       return true;
5826     };
5827 
5828     bool UsedAssumedInformation = false;
5829     bool Success = A.checkForAllCallLikeInstructions(
5830         AllocationIdentifierCB, *this, UsedAssumedInformation,
5831         /* CheckBBLivenessOnly */ false,
5832         /* CheckPotentiallyDead */ true);
5833     (void)Success;
5834     assert(Success && "Did not expect the call base visit callback to fail!");
5835   }
5836 
5837   const std::string getAsStr() const override {
5838     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5839     for (const auto &It : AllocationInfos) {
5840       if (It.second->Status == AllocationInfo::INVALID)
5841         ++NumInvalidMallocs;
5842       else
5843         ++NumH2SMallocs;
5844     }
5845     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5846            std::to_string(NumInvalidMallocs);
5847   }
5848 
5849   /// See AbstractAttribute::trackStatistics().
5850   void trackStatistics() const override {
5851     STATS_DECL(
5852         MallocCalls, Function,
5853         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5854     for (auto &It : AllocationInfos)
5855       if (It.second->Status != AllocationInfo::INVALID)
5856         ++BUILD_STAT_NAME(MallocCalls, Function);
5857   }
5858 
5859   bool isAssumedHeapToStack(const CallBase &CB) const override {
5860     if (isValidState())
5861       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5862         return AI->Status != AllocationInfo::INVALID;
5863     return false;
5864   }
5865 
5866   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5867     if (!isValidState())
5868       return false;
5869 
5870     for (auto &It : AllocationInfos) {
5871       AllocationInfo &AI = *It.second;
5872       if (AI.Status == AllocationInfo::INVALID)
5873         continue;
5874 
5875       if (AI.PotentialFreeCalls.count(&CB))
5876         return true;
5877     }
5878 
5879     return false;
5880   }
5881 
5882   ChangeStatus manifest(Attributor &A) override {
5883     assert(getState().isValidState() &&
5884            "Attempted to manifest an invalid state!");
5885 
5886     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5887     Function *F = getAnchorScope();
5888     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5889 
5890     for (auto &It : AllocationInfos) {
5891       AllocationInfo &AI = *It.second;
5892       if (AI.Status == AllocationInfo::INVALID)
5893         continue;
5894 
5895       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5896         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5897         A.deleteAfterManifest(*FreeCall);
5898         HasChanged = ChangeStatus::CHANGED;
5899       }
5900 
5901       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5902                         << "\n");
5903 
5904       auto Remark = [&](OptimizationRemark OR) {
5905         LibFunc IsAllocShared;
5906         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5907           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5908             return OR << "Moving globalized variable to the stack.";
5909         return OR << "Moving memory allocation from the heap to the stack.";
5910       };
5911       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5912         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5913       else
5914         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5915 
5916       Value *Size;
5917       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5918       if (SizeAPI.hasValue()) {
5919         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5920       } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5921         auto *Num = AI.CB->getOperand(0);
5922         auto *SizeT = AI.CB->getOperand(1);
5923         IRBuilder<> B(AI.CB);
5924         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5925       } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5926         Size = AI.CB->getOperand(1);
5927       } else {
5928         Size = AI.CB->getOperand(0);
5929       }
5930 
5931       Align Alignment(1);
5932       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5933         Optional<APInt> AlignmentAPI =
5934             getAPInt(A, *this, *AI.CB->getArgOperand(0));
5935         assert(AlignmentAPI.hasValue() &&
5936                "Expected an alignment during manifest!");
5937         Alignment =
5938             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
5939       }
5940 
5941       unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace();
5942       Instruction *Alloca =
5943           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5944                          "", AI.CB->getNextNode());
5945 
5946       if (Alloca->getType() != AI.CB->getType())
5947         Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
5948                                  Alloca->getNextNode());
5949 
5950       A.changeValueAfterManifest(*AI.CB, *Alloca);
5951 
5952       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5953         auto *NBB = II->getNormalDest();
5954         BranchInst::Create(NBB, AI.CB->getParent());
5955         A.deleteAfterManifest(*AI.CB);
5956       } else {
5957         A.deleteAfterManifest(*AI.CB);
5958       }
5959 
5960       // Zero out the allocated memory if it was a calloc.
5961       if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5962         auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
5963                                    Alloca->getNextNode());
5964         Value *Ops[] = {
5965             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5966             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5967 
5968         Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
5969         Module *M = F->getParent();
5970         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5971         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5972       }
5973       HasChanged = ChangeStatus::CHANGED;
5974     }
5975 
5976     return HasChanged;
5977   }
5978 
5979   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5980                            Value &V) {
5981     bool UsedAssumedInformation = false;
5982     Optional<Constant *> SimpleV =
5983         A.getAssumedConstant(V, AA, UsedAssumedInformation);
5984     if (!SimpleV.hasValue())
5985       return APInt(64, 0);
5986     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
5987       return CI->getValue();
5988     return llvm::None;
5989   }
5990 
5991   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
5992                           AllocationInfo &AI) {
5993 
5994     if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
5995       return getAPInt(A, AA, *AI.CB->getArgOperand(0));
5996 
5997     if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
5998       // Only if the alignment is also constant we return a size.
5999       return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
6000                  ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
6001                  : llvm::None;
6002 
6003     assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
6004            "Expected only callocs are left");
6005     Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
6006     Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
6007     if (!Num.hasValue() || !Size.hasValue())
6008       return llvm::None;
6009     bool Overflow = false;
6010     Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
6011     return Overflow ? llvm::None : Size;
6012   }
6013 
6014   /// Collection of all malloc-like calls in a function with associated
6015   /// information.
6016   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
6017 
6018   /// Collection of all free-like calls in a function with associated
6019   /// information.
6020   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
6021 
6022   ChangeStatus updateImpl(Attributor &A) override;
6023 };
6024 
6025 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6026   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6027   const Function *F = getAnchorScope();
6028 
6029   const auto &LivenessAA =
6030       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6031 
6032   MustBeExecutedContextExplorer &Explorer =
6033       A.getInfoCache().getMustBeExecutedContextExplorer();
6034 
6035   bool StackIsAccessibleByOtherThreads =
6036       A.getInfoCache().stackIsAccessibleByOtherThreads();
6037 
6038   // Flag to ensure we update our deallocation information at most once per
6039   // updateImpl call and only if we use the free check reasoning.
6040   bool HasUpdatedFrees = false;
6041 
6042   auto UpdateFrees = [&]() {
6043     HasUpdatedFrees = true;
6044 
6045     for (auto &It : DeallocationInfos) {
6046       DeallocationInfo &DI = *It.second;
6047       // For now we cannot use deallocations that have unknown inputs, skip
6048       // them.
6049       if (DI.MightFreeUnknownObjects)
6050         continue;
6051 
6052       // No need to analyze dead calls, ignore them instead.
6053       bool UsedAssumedInformation = false;
6054       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6055                           /* CheckBBLivenessOnly */ true))
6056         continue;
6057 
6058       // Use the optimistic version to get the freed objects, ignoring dead
6059       // branches etc.
6060       SmallVector<Value *, 8> Objects;
6061       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6062                                            *this, DI.CB)) {
6063         LLVM_DEBUG(
6064             dbgs()
6065             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6066         DI.MightFreeUnknownObjects = true;
6067         continue;
6068       }
6069 
6070       // Check each object explicitly.
6071       for (auto *Obj : Objects) {
6072         // Free of null and undef can be ignored as no-ops (or UB in the latter
6073         // case).
6074         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6075           continue;
6076 
6077         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6078         if (!ObjCB) {
6079           LLVM_DEBUG(dbgs()
6080                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6081           DI.MightFreeUnknownObjects = true;
6082           continue;
6083         }
6084 
6085         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6086         if (!AI) {
6087           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6088                             << "\n");
6089           DI.MightFreeUnknownObjects = true;
6090           continue;
6091         }
6092 
6093         DI.PotentialAllocationCalls.insert(ObjCB);
6094       }
6095     }
6096   };
6097 
6098   auto FreeCheck = [&](AllocationInfo &AI) {
6099     // If the stack is not accessible by other threads, the "must-free" logic
6100     // doesn't apply as the pointer could be shared and needs to be places in
6101     // "shareable" memory.
6102     if (!StackIsAccessibleByOtherThreads) {
6103       auto &NoSyncAA =
6104           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6105       if (!NoSyncAA.isAssumedNoSync()) {
6106         LLVM_DEBUG(
6107             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6108                       "other threads and function is not nosync:\n");
6109         return false;
6110       }
6111     }
6112     if (!HasUpdatedFrees)
6113       UpdateFrees();
6114 
6115     // TODO: Allow multi exit functions that have different free calls.
6116     if (AI.PotentialFreeCalls.size() != 1) {
6117       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6118                         << AI.PotentialFreeCalls.size() << "\n");
6119       return false;
6120     }
6121     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6122     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6123     if (!DI) {
6124       LLVM_DEBUG(
6125           dbgs() << "[H2S] unique free call was not known as deallocation call "
6126                  << *UniqueFree << "\n");
6127       return false;
6128     }
6129     if (DI->MightFreeUnknownObjects) {
6130       LLVM_DEBUG(
6131           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6132       return false;
6133     }
6134     if (DI->PotentialAllocationCalls.size() > 1) {
6135       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6136                         << DI->PotentialAllocationCalls.size()
6137                         << " different allocations\n");
6138       return false;
6139     }
6140     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6141       LLVM_DEBUG(
6142           dbgs()
6143           << "[H2S] unique free call not known to free this allocation but "
6144           << **DI->PotentialAllocationCalls.begin() << "\n");
6145       return false;
6146     }
6147     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6148     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6149       LLVM_DEBUG(
6150           dbgs()
6151           << "[H2S] unique free call might not be executed with the allocation "
6152           << *UniqueFree << "\n");
6153       return false;
6154     }
6155     return true;
6156   };
6157 
6158   auto UsesCheck = [&](AllocationInfo &AI) {
6159     bool ValidUsesOnly = true;
6160 
6161     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6162       Instruction *UserI = cast<Instruction>(U.getUser());
6163       if (isa<LoadInst>(UserI))
6164         return true;
6165       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6166         if (SI->getValueOperand() == U.get()) {
6167           LLVM_DEBUG(dbgs()
6168                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6169           ValidUsesOnly = false;
6170         } else {
6171           // A store into the malloc'ed memory is fine.
6172         }
6173         return true;
6174       }
6175       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6176         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6177           return true;
6178         if (DeallocationInfos.count(CB)) {
6179           AI.PotentialFreeCalls.insert(CB);
6180           return true;
6181         }
6182 
6183         unsigned ArgNo = CB->getArgOperandNo(&U);
6184 
6185         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6186             *this, IRPosition::callsite_argument(*CB, ArgNo),
6187             DepClassTy::OPTIONAL);
6188 
6189         // If a call site argument use is nofree, we are fine.
6190         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6191             *this, IRPosition::callsite_argument(*CB, ArgNo),
6192             DepClassTy::OPTIONAL);
6193 
6194         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6195         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6196         if (MaybeCaptured ||
6197             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6198              MaybeFreed)) {
6199           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6200 
6201           // Emit a missed remark if this is missed OpenMP globalization.
6202           auto Remark = [&](OptimizationRemarkMissed ORM) {
6203             return ORM
6204                    << "Could not move globalized variable to the stack. "
6205                       "Variable is potentially captured in call. Mark "
6206                       "parameter as `__attribute__((noescape))` to override.";
6207           };
6208 
6209           if (ValidUsesOnly &&
6210               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6211             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6212 
6213           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6214           ValidUsesOnly = false;
6215         }
6216         return true;
6217       }
6218 
6219       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6220           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6221         Follow = true;
6222         return true;
6223       }
6224       // Unknown user for which we can not track uses further (in a way that
6225       // makes sense).
6226       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6227       ValidUsesOnly = false;
6228       return true;
6229     };
6230     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6231       return false;
6232     return ValidUsesOnly;
6233   };
6234 
6235   // The actual update starts here. We look at all allocations and depending on
6236   // their status perform the appropriate check(s).
6237   for (auto &It : AllocationInfos) {
6238     AllocationInfo &AI = *It.second;
6239     if (AI.Status == AllocationInfo::INVALID)
6240       continue;
6241 
6242     if (MaxHeapToStackSize == -1) {
6243       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6244         if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
6245           LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6246                             << "\n");
6247           AI.Status = AllocationInfo::INVALID;
6248           Changed = ChangeStatus::CHANGED;
6249           continue;
6250         }
6251     } else {
6252       Optional<APInt> Size = getSize(A, *this, AI);
6253       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6254         LLVM_DEBUG({
6255           if (!Size.hasValue())
6256             dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
6257                    << "\n";
6258           else
6259             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6260                    << MaxHeapToStackSize << "\n";
6261         });
6262 
6263         AI.Status = AllocationInfo::INVALID;
6264         Changed = ChangeStatus::CHANGED;
6265         continue;
6266       }
6267     }
6268 
6269     switch (AI.Status) {
6270     case AllocationInfo::STACK_DUE_TO_USE:
6271       if (UsesCheck(AI))
6272         continue;
6273       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6274       LLVM_FALLTHROUGH;
6275     case AllocationInfo::STACK_DUE_TO_FREE:
6276       if (FreeCheck(AI))
6277         continue;
6278       AI.Status = AllocationInfo::INVALID;
6279       Changed = ChangeStatus::CHANGED;
6280       continue;
6281     case AllocationInfo::INVALID:
6282       llvm_unreachable("Invalid allocations should never reach this point!");
6283     };
6284   }
6285 
6286   return Changed;
6287 }
6288 
6289 /// ----------------------- Privatizable Pointers ------------------------------
6290 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6291   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6292       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6293 
6294   ChangeStatus indicatePessimisticFixpoint() override {
6295     AAPrivatizablePtr::indicatePessimisticFixpoint();
6296     PrivatizableType = nullptr;
6297     return ChangeStatus::CHANGED;
6298   }
6299 
6300   /// Identify the type we can chose for a private copy of the underlying
6301   /// argument. None means it is not clear yet, nullptr means there is none.
6302   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6303 
6304   /// Return a privatizable type that encloses both T0 and T1.
6305   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6306   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6307     if (!T0.hasValue())
6308       return T1;
6309     if (!T1.hasValue())
6310       return T0;
6311     if (T0 == T1)
6312       return T0;
6313     return nullptr;
6314   }
6315 
6316   Optional<Type *> getPrivatizableType() const override {
6317     return PrivatizableType;
6318   }
6319 
6320   const std::string getAsStr() const override {
6321     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6322   }
6323 
6324 protected:
6325   Optional<Type *> PrivatizableType;
6326 };
6327 
6328 // TODO: Do this for call site arguments (probably also other values) as well.
6329 
6330 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6331   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6332       : AAPrivatizablePtrImpl(IRP, A) {}
6333 
6334   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6335   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6336     // If this is a byval argument and we know all the call sites (so we can
6337     // rewrite them), there is no need to check them explicitly.
6338     bool AllCallSitesKnown;
6339     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6340         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6341                                true, AllCallSitesKnown))
6342       return getAssociatedValue().getType()->getPointerElementType();
6343 
6344     Optional<Type *> Ty;
6345     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6346 
6347     // Make sure the associated call site argument has the same type at all call
6348     // sites and it is an allocation we know is safe to privatize, for now that
6349     // means we only allow alloca instructions.
6350     // TODO: We can additionally analyze the accesses in the callee to  create
6351     //       the type from that information instead. That is a little more
6352     //       involved and will be done in a follow up patch.
6353     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6354       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6355       // Check if a coresponding argument was found or if it is one not
6356       // associated (which can happen for callback calls).
6357       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6358         return false;
6359 
6360       // Check that all call sites agree on a type.
6361       auto &PrivCSArgAA =
6362           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6363       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6364 
6365       LLVM_DEBUG({
6366         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6367         if (CSTy.hasValue() && CSTy.getValue())
6368           CSTy.getValue()->print(dbgs());
6369         else if (CSTy.hasValue())
6370           dbgs() << "<nullptr>";
6371         else
6372           dbgs() << "<none>";
6373       });
6374 
6375       Ty = combineTypes(Ty, CSTy);
6376 
6377       LLVM_DEBUG({
6378         dbgs() << " : New Type: ";
6379         if (Ty.hasValue() && Ty.getValue())
6380           Ty.getValue()->print(dbgs());
6381         else if (Ty.hasValue())
6382           dbgs() << "<nullptr>";
6383         else
6384           dbgs() << "<none>";
6385         dbgs() << "\n";
6386       });
6387 
6388       return !Ty.hasValue() || Ty.getValue();
6389     };
6390 
6391     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
6392       return nullptr;
6393     return Ty;
6394   }
6395 
6396   /// See AbstractAttribute::updateImpl(...).
6397   ChangeStatus updateImpl(Attributor &A) override {
6398     PrivatizableType = identifyPrivatizableType(A);
6399     if (!PrivatizableType.hasValue())
6400       return ChangeStatus::UNCHANGED;
6401     if (!PrivatizableType.getValue())
6402       return indicatePessimisticFixpoint();
6403 
6404     // The dependence is optional so we don't give up once we give up on the
6405     // alignment.
6406     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6407                         DepClassTy::OPTIONAL);
6408 
6409     // Avoid arguments with padding for now.
6410     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6411         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6412                                                 A.getInfoCache().getDL())) {
6413       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6414       return indicatePessimisticFixpoint();
6415     }
6416 
6417     // Verify callee and caller agree on how the promoted argument would be
6418     // passed.
6419     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
6420     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
6421     // which doesn't require the arguments ArgumentPromotion wanted to pass.
6422     Function &Fn = *getIRPosition().getAnchorScope();
6423     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
6424     ArgsToPromote.insert(getAssociatedArgument());
6425     const auto *TTI =
6426         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6427     if (!TTI ||
6428         !ArgumentPromotionPass::areFunctionArgsABICompatible(
6429             Fn, *TTI, ArgsToPromote, Dummy) ||
6430         ArgsToPromote.empty()) {
6431       LLVM_DEBUG(
6432           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6433                  << Fn.getName() << "\n");
6434       return indicatePessimisticFixpoint();
6435     }
6436 
6437     // Collect the types that will replace the privatizable type in the function
6438     // signature.
6439     SmallVector<Type *, 16> ReplacementTypes;
6440     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6441 
6442     // Register a rewrite of the argument.
6443     Argument *Arg = getAssociatedArgument();
6444     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6445       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6446       return indicatePessimisticFixpoint();
6447     }
6448 
6449     unsigned ArgNo = Arg->getArgNo();
6450 
6451     // Helper to check if for the given call site the associated argument is
6452     // passed to a callback where the privatization would be different.
6453     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6454       SmallVector<const Use *, 4> CallbackUses;
6455       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6456       for (const Use *U : CallbackUses) {
6457         AbstractCallSite CBACS(U);
6458         assert(CBACS && CBACS.isCallbackCall());
6459         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6460           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6461 
6462           LLVM_DEBUG({
6463             dbgs()
6464                 << "[AAPrivatizablePtr] Argument " << *Arg
6465                 << "check if can be privatized in the context of its parent ("
6466                 << Arg->getParent()->getName()
6467                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6468                    "callback ("
6469                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6470                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6471                 << CBACS.getCallArgOperand(CBArg) << " vs "
6472                 << CB.getArgOperand(ArgNo) << "\n"
6473                 << "[AAPrivatizablePtr] " << CBArg << " : "
6474                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6475           });
6476 
6477           if (CBArgNo != int(ArgNo))
6478             continue;
6479           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6480               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6481           if (CBArgPrivAA.isValidState()) {
6482             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6483             if (!CBArgPrivTy.hasValue())
6484               continue;
6485             if (CBArgPrivTy.getValue() == PrivatizableType)
6486               continue;
6487           }
6488 
6489           LLVM_DEBUG({
6490             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6491                    << " cannot be privatized in the context of its parent ("
6492                    << Arg->getParent()->getName()
6493                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6494                       "callback ("
6495                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6496                    << ").\n[AAPrivatizablePtr] for which the argument "
6497                       "privatization is not compatible.\n";
6498           });
6499           return false;
6500         }
6501       }
6502       return true;
6503     };
6504 
6505     // Helper to check if for the given call site the associated argument is
6506     // passed to a direct call where the privatization would be different.
6507     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6508       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6509       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6510       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6511              "Expected a direct call operand for callback call operand");
6512 
6513       LLVM_DEBUG({
6514         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6515                << " check if be privatized in the context of its parent ("
6516                << Arg->getParent()->getName()
6517                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6518                   "direct call of ("
6519                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6520                << ").\n";
6521       });
6522 
6523       Function *DCCallee = DC->getCalledFunction();
6524       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6525         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6526             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6527             DepClassTy::REQUIRED);
6528         if (DCArgPrivAA.isValidState()) {
6529           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6530           if (!DCArgPrivTy.hasValue())
6531             return true;
6532           if (DCArgPrivTy.getValue() == PrivatizableType)
6533             return true;
6534         }
6535       }
6536 
6537       LLVM_DEBUG({
6538         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6539                << " cannot be privatized in the context of its parent ("
6540                << Arg->getParent()->getName()
6541                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6542                   "direct call of ("
6543                << ACS.getInstruction()->getCalledFunction()->getName()
6544                << ").\n[AAPrivatizablePtr] for which the argument "
6545                   "privatization is not compatible.\n";
6546       });
6547       return false;
6548     };
6549 
6550     // Helper to check if the associated argument is used at the given abstract
6551     // call site in a way that is incompatible with the privatization assumed
6552     // here.
6553     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6554       if (ACS.isDirectCall())
6555         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6556       if (ACS.isCallbackCall())
6557         return IsCompatiblePrivArgOfDirectCS(ACS);
6558       return false;
6559     };
6560 
6561     bool AllCallSitesKnown;
6562     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6563                                 AllCallSitesKnown))
6564       return indicatePessimisticFixpoint();
6565 
6566     return ChangeStatus::UNCHANGED;
6567   }
6568 
6569   /// Given a type to private \p PrivType, collect the constituates (which are
6570   /// used) in \p ReplacementTypes.
6571   static void
6572   identifyReplacementTypes(Type *PrivType,
6573                            SmallVectorImpl<Type *> &ReplacementTypes) {
6574     // TODO: For now we expand the privatization type to the fullest which can
6575     //       lead to dead arguments that need to be removed later.
6576     assert(PrivType && "Expected privatizable type!");
6577 
6578     // Traverse the type, extract constituate types on the outermost level.
6579     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6580       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6581         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6582     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6583       ReplacementTypes.append(PrivArrayType->getNumElements(),
6584                               PrivArrayType->getElementType());
6585     } else {
6586       ReplacementTypes.push_back(PrivType);
6587     }
6588   }
6589 
6590   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6591   /// The values needed are taken from the arguments of \p F starting at
6592   /// position \p ArgNo.
6593   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6594                                    unsigned ArgNo, Instruction &IP) {
6595     assert(PrivType && "Expected privatizable type!");
6596 
6597     IRBuilder<NoFolder> IRB(&IP);
6598     const DataLayout &DL = F.getParent()->getDataLayout();
6599 
6600     // Traverse the type, build GEPs and stores.
6601     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6602       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6603       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6604         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6605         Value *Ptr =
6606             constructPointer(PointeeTy, PrivType, &Base,
6607                              PrivStructLayout->getElementOffset(u), IRB, DL);
6608         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6609       }
6610     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6611       Type *PointeeTy = PrivArrayType->getElementType();
6612       Type *PointeePtrTy = PointeeTy->getPointerTo();
6613       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6614       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6615         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6616                                       u * PointeeTySize, IRB, DL);
6617         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6618       }
6619     } else {
6620       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6621     }
6622   }
6623 
6624   /// Extract values from \p Base according to the type \p PrivType at the
6625   /// call position \p ACS. The values are appended to \p ReplacementValues.
6626   void createReplacementValues(Align Alignment, Type *PrivType,
6627                                AbstractCallSite ACS, Value *Base,
6628                                SmallVectorImpl<Value *> &ReplacementValues) {
6629     assert(Base && "Expected base value!");
6630     assert(PrivType && "Expected privatizable type!");
6631     Instruction *IP = ACS.getInstruction();
6632 
6633     IRBuilder<NoFolder> IRB(IP);
6634     const DataLayout &DL = IP->getModule()->getDataLayout();
6635 
6636     if (Base->getType()->getPointerElementType() != PrivType)
6637       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
6638                                                  "", ACS.getInstruction());
6639 
6640     // Traverse the type, build GEPs and loads.
6641     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6642       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6643       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6644         Type *PointeeTy = PrivStructType->getElementType(u);
6645         Value *Ptr =
6646             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6647                              PrivStructLayout->getElementOffset(u), IRB, DL);
6648         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6649         L->setAlignment(Alignment);
6650         ReplacementValues.push_back(L);
6651       }
6652     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6653       Type *PointeeTy = PrivArrayType->getElementType();
6654       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6655       Type *PointeePtrTy = PointeeTy->getPointerTo();
6656       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6657         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6658                                       u * PointeeTySize, IRB, DL);
6659         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6660         L->setAlignment(Alignment);
6661         ReplacementValues.push_back(L);
6662       }
6663     } else {
6664       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6665       L->setAlignment(Alignment);
6666       ReplacementValues.push_back(L);
6667     }
6668   }
6669 
6670   /// See AbstractAttribute::manifest(...)
6671   ChangeStatus manifest(Attributor &A) override {
6672     if (!PrivatizableType.hasValue())
6673       return ChangeStatus::UNCHANGED;
6674     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6675 
6676     // Collect all tail calls in the function as we cannot allow new allocas to
6677     // escape into tail recursion.
6678     // TODO: Be smarter about new allocas escaping into tail calls.
6679     SmallVector<CallInst *, 16> TailCalls;
6680     bool UsedAssumedInformation = false;
6681     if (!A.checkForAllInstructions(
6682             [&](Instruction &I) {
6683               CallInst &CI = cast<CallInst>(I);
6684               if (CI.isTailCall())
6685                 TailCalls.push_back(&CI);
6686               return true;
6687             },
6688             *this, {Instruction::Call}, UsedAssumedInformation))
6689       return ChangeStatus::UNCHANGED;
6690 
6691     Argument *Arg = getAssociatedArgument();
6692     // Query AAAlign attribute for alignment of associated argument to
6693     // determine the best alignment of loads.
6694     const auto &AlignAA =
6695         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6696 
6697     // Callback to repair the associated function. A new alloca is placed at the
6698     // beginning and initialized with the values passed through arguments. The
6699     // new alloca replaces the use of the old pointer argument.
6700     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6701         [=](const Attributor::ArgumentReplacementInfo &ARI,
6702             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6703           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6704           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6705           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
6706                                            Arg->getName() + ".priv", IP);
6707           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6708                                ArgIt->getArgNo(), *IP);
6709 
6710           if (AI->getType() != Arg->getType())
6711             AI =
6712                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
6713           Arg->replaceAllUsesWith(AI);
6714 
6715           for (CallInst *CI : TailCalls)
6716             CI->setTailCall(false);
6717         };
6718 
6719     // Callback to repair a call site of the associated function. The elements
6720     // of the privatizable type are loaded prior to the call and passed to the
6721     // new function version.
6722     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6723         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6724                       AbstractCallSite ACS,
6725                       SmallVectorImpl<Value *> &NewArgOperands) {
6726           // When no alignment is specified for the load instruction,
6727           // natural alignment is assumed.
6728           createReplacementValues(
6729               assumeAligned(AlignAA.getAssumedAlign()),
6730               PrivatizableType.getValue(), ACS,
6731               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6732               NewArgOperands);
6733         };
6734 
6735     // Collect the types that will replace the privatizable type in the function
6736     // signature.
6737     SmallVector<Type *, 16> ReplacementTypes;
6738     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6739 
6740     // Register a rewrite of the argument.
6741     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6742                                            std::move(FnRepairCB),
6743                                            std::move(ACSRepairCB)))
6744       return ChangeStatus::CHANGED;
6745     return ChangeStatus::UNCHANGED;
6746   }
6747 
6748   /// See AbstractAttribute::trackStatistics()
6749   void trackStatistics() const override {
6750     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6751   }
6752 };
6753 
6754 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6755   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6756       : AAPrivatizablePtrImpl(IRP, A) {}
6757 
6758   /// See AbstractAttribute::initialize(...).
6759   virtual void initialize(Attributor &A) override {
6760     // TODO: We can privatize more than arguments.
6761     indicatePessimisticFixpoint();
6762   }
6763 
6764   ChangeStatus updateImpl(Attributor &A) override {
6765     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6766                      "updateImpl will not be called");
6767   }
6768 
6769   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6770   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6771     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6772     if (!Obj) {
6773       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6774       return nullptr;
6775     }
6776 
6777     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6778       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6779         if (CI->isOne())
6780           return Obj->getType()->getPointerElementType();
6781     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6782       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6783           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6784       if (PrivArgAA.isAssumedPrivatizablePtr())
6785         return Obj->getType()->getPointerElementType();
6786     }
6787 
6788     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6789                          "alloca nor privatizable argument: "
6790                       << *Obj << "!\n");
6791     return nullptr;
6792   }
6793 
6794   /// See AbstractAttribute::trackStatistics()
6795   void trackStatistics() const override {
6796     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6797   }
6798 };
6799 
6800 struct AAPrivatizablePtrCallSiteArgument final
6801     : public AAPrivatizablePtrFloating {
6802   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6803       : AAPrivatizablePtrFloating(IRP, A) {}
6804 
6805   /// See AbstractAttribute::initialize(...).
6806   void initialize(Attributor &A) override {
6807     if (getIRPosition().hasAttr(Attribute::ByVal))
6808       indicateOptimisticFixpoint();
6809   }
6810 
6811   /// See AbstractAttribute::updateImpl(...).
6812   ChangeStatus updateImpl(Attributor &A) override {
6813     PrivatizableType = identifyPrivatizableType(A);
6814     if (!PrivatizableType.hasValue())
6815       return ChangeStatus::UNCHANGED;
6816     if (!PrivatizableType.getValue())
6817       return indicatePessimisticFixpoint();
6818 
6819     const IRPosition &IRP = getIRPosition();
6820     auto &NoCaptureAA =
6821         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6822     if (!NoCaptureAA.isAssumedNoCapture()) {
6823       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6824       return indicatePessimisticFixpoint();
6825     }
6826 
6827     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6828     if (!NoAliasAA.isAssumedNoAlias()) {
6829       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6830       return indicatePessimisticFixpoint();
6831     }
6832 
6833     const auto &MemBehaviorAA =
6834         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
6835     if (!MemBehaviorAA.isAssumedReadOnly()) {
6836       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6837       return indicatePessimisticFixpoint();
6838     }
6839 
6840     return ChangeStatus::UNCHANGED;
6841   }
6842 
6843   /// See AbstractAttribute::trackStatistics()
6844   void trackStatistics() const override {
6845     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6846   }
6847 };
6848 
6849 struct AAPrivatizablePtrCallSiteReturned final
6850     : public AAPrivatizablePtrFloating {
6851   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6852       : AAPrivatizablePtrFloating(IRP, A) {}
6853 
6854   /// See AbstractAttribute::initialize(...).
6855   void initialize(Attributor &A) override {
6856     // TODO: We can privatize more than arguments.
6857     indicatePessimisticFixpoint();
6858   }
6859 
6860   /// See AbstractAttribute::trackStatistics()
6861   void trackStatistics() const override {
6862     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6863   }
6864 };
6865 
6866 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6867   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6868       : AAPrivatizablePtrFloating(IRP, A) {}
6869 
6870   /// See AbstractAttribute::initialize(...).
6871   void initialize(Attributor &A) override {
6872     // TODO: We can privatize more than arguments.
6873     indicatePessimisticFixpoint();
6874   }
6875 
6876   /// See AbstractAttribute::trackStatistics()
6877   void trackStatistics() const override {
6878     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6879   }
6880 };
6881 
6882 /// -------------------- Memory Behavior Attributes ----------------------------
6883 /// Includes read-none, read-only, and write-only.
6884 /// ----------------------------------------------------------------------------
6885 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6886   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6887       : AAMemoryBehavior(IRP, A) {}
6888 
6889   /// See AbstractAttribute::initialize(...).
6890   void initialize(Attributor &A) override {
6891     intersectAssumedBits(BEST_STATE);
6892     getKnownStateFromValue(getIRPosition(), getState());
6893     AAMemoryBehavior::initialize(A);
6894   }
6895 
6896   /// Return the memory behavior information encoded in the IR for \p IRP.
6897   static void getKnownStateFromValue(const IRPosition &IRP,
6898                                      BitIntegerState &State,
6899                                      bool IgnoreSubsumingPositions = false) {
6900     SmallVector<Attribute, 2> Attrs;
6901     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6902     for (const Attribute &Attr : Attrs) {
6903       switch (Attr.getKindAsEnum()) {
6904       case Attribute::ReadNone:
6905         State.addKnownBits(NO_ACCESSES);
6906         break;
6907       case Attribute::ReadOnly:
6908         State.addKnownBits(NO_WRITES);
6909         break;
6910       case Attribute::WriteOnly:
6911         State.addKnownBits(NO_READS);
6912         break;
6913       default:
6914         llvm_unreachable("Unexpected attribute!");
6915       }
6916     }
6917 
6918     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6919       if (!I->mayReadFromMemory())
6920         State.addKnownBits(NO_READS);
6921       if (!I->mayWriteToMemory())
6922         State.addKnownBits(NO_WRITES);
6923     }
6924   }
6925 
6926   /// See AbstractAttribute::getDeducedAttributes(...).
6927   void getDeducedAttributes(LLVMContext &Ctx,
6928                             SmallVectorImpl<Attribute> &Attrs) const override {
6929     assert(Attrs.size() == 0);
6930     if (isAssumedReadNone())
6931       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6932     else if (isAssumedReadOnly())
6933       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6934     else if (isAssumedWriteOnly())
6935       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6936     assert(Attrs.size() <= 1);
6937   }
6938 
6939   /// See AbstractAttribute::manifest(...).
6940   ChangeStatus manifest(Attributor &A) override {
6941     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6942       return ChangeStatus::UNCHANGED;
6943 
6944     const IRPosition &IRP = getIRPosition();
6945 
6946     // Check if we would improve the existing attributes first.
6947     SmallVector<Attribute, 4> DeducedAttrs;
6948     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6949     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6950           return IRP.hasAttr(Attr.getKindAsEnum(),
6951                              /* IgnoreSubsumingPositions */ true);
6952         }))
6953       return ChangeStatus::UNCHANGED;
6954 
6955     // Clear existing attributes.
6956     IRP.removeAttrs(AttrKinds);
6957 
6958     // Use the generic manifest method.
6959     return IRAttribute::manifest(A);
6960   }
6961 
6962   /// See AbstractState::getAsStr().
6963   const std::string getAsStr() const override {
6964     if (isAssumedReadNone())
6965       return "readnone";
6966     if (isAssumedReadOnly())
6967       return "readonly";
6968     if (isAssumedWriteOnly())
6969       return "writeonly";
6970     return "may-read/write";
6971   }
6972 
6973   /// The set of IR attributes AAMemoryBehavior deals with.
6974   static const Attribute::AttrKind AttrKinds[3];
6975 };
6976 
6977 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6978     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6979 
6980 /// Memory behavior attribute for a floating value.
6981 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6982   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6983       : AAMemoryBehaviorImpl(IRP, A) {}
6984 
6985   /// See AbstractAttribute::updateImpl(...).
6986   ChangeStatus updateImpl(Attributor &A) override;
6987 
6988   /// See AbstractAttribute::trackStatistics()
6989   void trackStatistics() const override {
6990     if (isAssumedReadNone())
6991       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6992     else if (isAssumedReadOnly())
6993       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6994     else if (isAssumedWriteOnly())
6995       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6996   }
6997 
6998 private:
6999   /// Return true if users of \p UserI might access the underlying
7000   /// variable/location described by \p U and should therefore be analyzed.
7001   bool followUsersOfUseIn(Attributor &A, const Use &U,
7002                           const Instruction *UserI);
7003 
7004   /// Update the state according to the effect of use \p U in \p UserI.
7005   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7006 };
7007 
7008 /// Memory behavior attribute for function argument.
7009 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7010   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7011       : AAMemoryBehaviorFloating(IRP, A) {}
7012 
7013   /// See AbstractAttribute::initialize(...).
7014   void initialize(Attributor &A) override {
7015     intersectAssumedBits(BEST_STATE);
7016     const IRPosition &IRP = getIRPosition();
7017     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7018     // can query it when we use has/getAttr. That would allow us to reuse the
7019     // initialize of the base class here.
7020     bool HasByVal =
7021         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7022     getKnownStateFromValue(IRP, getState(),
7023                            /* IgnoreSubsumingPositions */ HasByVal);
7024 
7025     // Initialize the use vector with all direct uses of the associated value.
7026     Argument *Arg = getAssociatedArgument();
7027     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7028       indicatePessimisticFixpoint();
7029   }
7030 
7031   ChangeStatus manifest(Attributor &A) override {
7032     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7033     if (!getAssociatedValue().getType()->isPointerTy())
7034       return ChangeStatus::UNCHANGED;
7035 
7036     // TODO: From readattrs.ll: "inalloca parameters are always
7037     //                           considered written"
7038     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7039       removeKnownBits(NO_WRITES);
7040       removeAssumedBits(NO_WRITES);
7041     }
7042     return AAMemoryBehaviorFloating::manifest(A);
7043   }
7044 
7045   /// See AbstractAttribute::trackStatistics()
7046   void trackStatistics() const override {
7047     if (isAssumedReadNone())
7048       STATS_DECLTRACK_ARG_ATTR(readnone)
7049     else if (isAssumedReadOnly())
7050       STATS_DECLTRACK_ARG_ATTR(readonly)
7051     else if (isAssumedWriteOnly())
7052       STATS_DECLTRACK_ARG_ATTR(writeonly)
7053   }
7054 };
7055 
7056 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7057   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7058       : AAMemoryBehaviorArgument(IRP, A) {}
7059 
7060   /// See AbstractAttribute::initialize(...).
7061   void initialize(Attributor &A) override {
7062     // If we don't have an associated attribute this is either a variadic call
7063     // or an indirect call, either way, nothing to do here.
7064     Argument *Arg = getAssociatedArgument();
7065     if (!Arg) {
7066       indicatePessimisticFixpoint();
7067       return;
7068     }
7069     if (Arg->hasByValAttr()) {
7070       addKnownBits(NO_WRITES);
7071       removeKnownBits(NO_READS);
7072       removeAssumedBits(NO_READS);
7073     }
7074     AAMemoryBehaviorArgument::initialize(A);
7075     if (getAssociatedFunction()->isDeclaration())
7076       indicatePessimisticFixpoint();
7077   }
7078 
7079   /// See AbstractAttribute::updateImpl(...).
7080   ChangeStatus updateImpl(Attributor &A) override {
7081     // TODO: Once we have call site specific value information we can provide
7082     //       call site specific liveness liveness information and then it makes
7083     //       sense to specialize attributes for call sites arguments instead of
7084     //       redirecting requests to the callee argument.
7085     Argument *Arg = getAssociatedArgument();
7086     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7087     auto &ArgAA =
7088         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7089     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7090   }
7091 
7092   /// See AbstractAttribute::trackStatistics()
7093   void trackStatistics() const override {
7094     if (isAssumedReadNone())
7095       STATS_DECLTRACK_CSARG_ATTR(readnone)
7096     else if (isAssumedReadOnly())
7097       STATS_DECLTRACK_CSARG_ATTR(readonly)
7098     else if (isAssumedWriteOnly())
7099       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7100   }
7101 };
7102 
7103 /// Memory behavior attribute for a call site return position.
7104 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7105   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7106       : AAMemoryBehaviorFloating(IRP, A) {}
7107 
7108   /// See AbstractAttribute::initialize(...).
7109   void initialize(Attributor &A) override {
7110     AAMemoryBehaviorImpl::initialize(A);
7111     Function *F = getAssociatedFunction();
7112     if (!F || F->isDeclaration())
7113       indicatePessimisticFixpoint();
7114   }
7115 
7116   /// See AbstractAttribute::manifest(...).
7117   ChangeStatus manifest(Attributor &A) override {
7118     // We do not annotate returned values.
7119     return ChangeStatus::UNCHANGED;
7120   }
7121 
7122   /// See AbstractAttribute::trackStatistics()
7123   void trackStatistics() const override {}
7124 };
7125 
7126 /// An AA to represent the memory behavior function attributes.
7127 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7128   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7129       : AAMemoryBehaviorImpl(IRP, A) {}
7130 
7131   /// See AbstractAttribute::updateImpl(Attributor &A).
7132   virtual ChangeStatus updateImpl(Attributor &A) override;
7133 
7134   /// See AbstractAttribute::manifest(...).
7135   ChangeStatus manifest(Attributor &A) override {
7136     Function &F = cast<Function>(getAnchorValue());
7137     if (isAssumedReadNone()) {
7138       F.removeFnAttr(Attribute::ArgMemOnly);
7139       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7140       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7141     }
7142     return AAMemoryBehaviorImpl::manifest(A);
7143   }
7144 
7145   /// See AbstractAttribute::trackStatistics()
7146   void trackStatistics() const override {
7147     if (isAssumedReadNone())
7148       STATS_DECLTRACK_FN_ATTR(readnone)
7149     else if (isAssumedReadOnly())
7150       STATS_DECLTRACK_FN_ATTR(readonly)
7151     else if (isAssumedWriteOnly())
7152       STATS_DECLTRACK_FN_ATTR(writeonly)
7153   }
7154 };
7155 
7156 /// AAMemoryBehavior attribute for call sites.
7157 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7158   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7159       : AAMemoryBehaviorImpl(IRP, A) {}
7160 
7161   /// See AbstractAttribute::initialize(...).
7162   void initialize(Attributor &A) override {
7163     AAMemoryBehaviorImpl::initialize(A);
7164     Function *F = getAssociatedFunction();
7165     if (!F || F->isDeclaration())
7166       indicatePessimisticFixpoint();
7167   }
7168 
7169   /// See AbstractAttribute::updateImpl(...).
7170   ChangeStatus updateImpl(Attributor &A) override {
7171     // TODO: Once we have call site specific value information we can provide
7172     //       call site specific liveness liveness information and then it makes
7173     //       sense to specialize attributes for call sites arguments instead of
7174     //       redirecting requests to the callee argument.
7175     Function *F = getAssociatedFunction();
7176     const IRPosition &FnPos = IRPosition::function(*F);
7177     auto &FnAA =
7178         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7179     return clampStateAndIndicateChange(getState(), FnAA.getState());
7180   }
7181 
7182   /// See AbstractAttribute::trackStatistics()
7183   void trackStatistics() const override {
7184     if (isAssumedReadNone())
7185       STATS_DECLTRACK_CS_ATTR(readnone)
7186     else if (isAssumedReadOnly())
7187       STATS_DECLTRACK_CS_ATTR(readonly)
7188     else if (isAssumedWriteOnly())
7189       STATS_DECLTRACK_CS_ATTR(writeonly)
7190   }
7191 };
7192 
7193 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7194 
7195   // The current assumed state used to determine a change.
7196   auto AssumedState = getAssumed();
7197 
7198   auto CheckRWInst = [&](Instruction &I) {
7199     // If the instruction has an own memory behavior state, use it to restrict
7200     // the local state. No further analysis is required as the other memory
7201     // state is as optimistic as it gets.
7202     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7203       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7204           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7205       intersectAssumedBits(MemBehaviorAA.getAssumed());
7206       return !isAtFixpoint();
7207     }
7208 
7209     // Remove access kind modifiers if necessary.
7210     if (I.mayReadFromMemory())
7211       removeAssumedBits(NO_READS);
7212     if (I.mayWriteToMemory())
7213       removeAssumedBits(NO_WRITES);
7214     return !isAtFixpoint();
7215   };
7216 
7217   bool UsedAssumedInformation = false;
7218   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7219                                           UsedAssumedInformation))
7220     return indicatePessimisticFixpoint();
7221 
7222   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7223                                         : ChangeStatus::UNCHANGED;
7224 }
7225 
7226 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7227 
7228   const IRPosition &IRP = getIRPosition();
7229   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7230   AAMemoryBehavior::StateType &S = getState();
7231 
7232   // First, check the function scope. We take the known information and we avoid
7233   // work if the assumed information implies the current assumed information for
7234   // this attribute. This is a valid for all but byval arguments.
7235   Argument *Arg = IRP.getAssociatedArgument();
7236   AAMemoryBehavior::base_t FnMemAssumedState =
7237       AAMemoryBehavior::StateType::getWorstState();
7238   if (!Arg || !Arg->hasByValAttr()) {
7239     const auto &FnMemAA =
7240         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7241     FnMemAssumedState = FnMemAA.getAssumed();
7242     S.addKnownBits(FnMemAA.getKnown());
7243     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7244       return ChangeStatus::UNCHANGED;
7245   }
7246 
7247   // The current assumed state used to determine a change.
7248   auto AssumedState = S.getAssumed();
7249 
7250   // Make sure the value is not captured (except through "return"), if
7251   // it is, any information derived would be irrelevant anyway as we cannot
7252   // check the potential aliases introduced by the capture. However, no need
7253   // to fall back to anythign less optimistic than the function state.
7254   const auto &ArgNoCaptureAA =
7255       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7256   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7257     S.intersectAssumedBits(FnMemAssumedState);
7258     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7259                                           : ChangeStatus::UNCHANGED;
7260   }
7261 
7262   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7263   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7264     Instruction *UserI = cast<Instruction>(U.getUser());
7265     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7266                       << " \n");
7267 
7268     // Droppable users, e.g., llvm::assume does not actually perform any action.
7269     if (UserI->isDroppable())
7270       return true;
7271 
7272     // Check if the users of UserI should also be visited.
7273     Follow = followUsersOfUseIn(A, U, UserI);
7274 
7275     // If UserI might touch memory we analyze the use in detail.
7276     if (UserI->mayReadOrWriteMemory())
7277       analyzeUseIn(A, U, UserI);
7278 
7279     return !isAtFixpoint();
7280   };
7281 
7282   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7283     return indicatePessimisticFixpoint();
7284 
7285   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7286                                         : ChangeStatus::UNCHANGED;
7287 }
7288 
7289 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7290                                                   const Instruction *UserI) {
7291   // The loaded value is unrelated to the pointer argument, no need to
7292   // follow the users of the load.
7293   if (isa<LoadInst>(UserI))
7294     return false;
7295 
7296   // By default we follow all uses assuming UserI might leak information on U,
7297   // we have special handling for call sites operands though.
7298   const auto *CB = dyn_cast<CallBase>(UserI);
7299   if (!CB || !CB->isArgOperand(&U))
7300     return true;
7301 
7302   // If the use is a call argument known not to be captured, the users of
7303   // the call do not need to be visited because they have to be unrelated to
7304   // the input. Note that this check is not trivial even though we disallow
7305   // general capturing of the underlying argument. The reason is that the
7306   // call might the argument "through return", which we allow and for which we
7307   // need to check call users.
7308   if (U.get()->getType()->isPointerTy()) {
7309     unsigned ArgNo = CB->getArgOperandNo(&U);
7310     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7311         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7312     return !ArgNoCaptureAA.isAssumedNoCapture();
7313   }
7314 
7315   return true;
7316 }
7317 
7318 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7319                                             const Instruction *UserI) {
7320   assert(UserI->mayReadOrWriteMemory());
7321 
7322   switch (UserI->getOpcode()) {
7323   default:
7324     // TODO: Handle all atomics and other side-effect operations we know of.
7325     break;
7326   case Instruction::Load:
7327     // Loads cause the NO_READS property to disappear.
7328     removeAssumedBits(NO_READS);
7329     return;
7330 
7331   case Instruction::Store:
7332     // Stores cause the NO_WRITES property to disappear if the use is the
7333     // pointer operand. Note that while capturing was taken care of somewhere
7334     // else we need to deal with stores of the value that is not looked through.
7335     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7336       removeAssumedBits(NO_WRITES);
7337     else
7338       indicatePessimisticFixpoint();
7339     return;
7340 
7341   case Instruction::Call:
7342   case Instruction::CallBr:
7343   case Instruction::Invoke: {
7344     // For call sites we look at the argument memory behavior attribute (this
7345     // could be recursive!) in order to restrict our own state.
7346     const auto *CB = cast<CallBase>(UserI);
7347 
7348     // Give up on operand bundles.
7349     if (CB->isBundleOperand(&U)) {
7350       indicatePessimisticFixpoint();
7351       return;
7352     }
7353 
7354     // Calling a function does read the function pointer, maybe write it if the
7355     // function is self-modifying.
7356     if (CB->isCallee(&U)) {
7357       removeAssumedBits(NO_READS);
7358       break;
7359     }
7360 
7361     // Adjust the possible access behavior based on the information on the
7362     // argument.
7363     IRPosition Pos;
7364     if (U.get()->getType()->isPointerTy())
7365       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7366     else
7367       Pos = IRPosition::callsite_function(*CB);
7368     const auto &MemBehaviorAA =
7369         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7370     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7371     // and at least "known".
7372     intersectAssumedBits(MemBehaviorAA.getAssumed());
7373     return;
7374   }
7375   };
7376 
7377   // Generally, look at the "may-properties" and adjust the assumed state if we
7378   // did not trigger special handling before.
7379   if (UserI->mayReadFromMemory())
7380     removeAssumedBits(NO_READS);
7381   if (UserI->mayWriteToMemory())
7382     removeAssumedBits(NO_WRITES);
7383 }
7384 } // namespace
7385 
7386 /// -------------------- Memory Locations Attributes ---------------------------
7387 /// Includes read-none, argmemonly, inaccessiblememonly,
7388 /// inaccessiblememorargmemonly
7389 /// ----------------------------------------------------------------------------
7390 
7391 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7392     AAMemoryLocation::MemoryLocationsKind MLK) {
7393   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7394     return "all memory";
7395   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7396     return "no memory";
7397   std::string S = "memory:";
7398   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7399     S += "stack,";
7400   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7401     S += "constant,";
7402   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7403     S += "internal global,";
7404   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7405     S += "external global,";
7406   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7407     S += "argument,";
7408   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7409     S += "inaccessible,";
7410   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7411     S += "malloced,";
7412   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7413     S += "unknown,";
7414   S.pop_back();
7415   return S;
7416 }
7417 
7418 namespace {
7419 struct AAMemoryLocationImpl : public AAMemoryLocation {
7420 
7421   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7422       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7423     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7424       AccessKind2Accesses[u] = nullptr;
7425   }
7426 
7427   ~AAMemoryLocationImpl() {
7428     // The AccessSets are allocated via a BumpPtrAllocator, we call
7429     // the destructor manually.
7430     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7431       if (AccessKind2Accesses[u])
7432         AccessKind2Accesses[u]->~AccessSet();
7433   }
7434 
7435   /// See AbstractAttribute::initialize(...).
7436   void initialize(Attributor &A) override {
7437     intersectAssumedBits(BEST_STATE);
7438     getKnownStateFromValue(A, getIRPosition(), getState());
7439     AAMemoryLocation::initialize(A);
7440   }
7441 
7442   /// Return the memory behavior information encoded in the IR for \p IRP.
7443   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7444                                      BitIntegerState &State,
7445                                      bool IgnoreSubsumingPositions = false) {
7446     // For internal functions we ignore `argmemonly` and
7447     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7448     // constant propagation. It is unclear if this is the best way but it is
7449     // unlikely this will cause real performance problems. If we are deriving
7450     // attributes for the anchor function we even remove the attribute in
7451     // addition to ignoring it.
7452     bool UseArgMemOnly = true;
7453     Function *AnchorFn = IRP.getAnchorScope();
7454     if (AnchorFn && A.isRunOn(*AnchorFn))
7455       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7456 
7457     SmallVector<Attribute, 2> Attrs;
7458     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7459     for (const Attribute &Attr : Attrs) {
7460       switch (Attr.getKindAsEnum()) {
7461       case Attribute::ReadNone:
7462         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7463         break;
7464       case Attribute::InaccessibleMemOnly:
7465         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7466         break;
7467       case Attribute::ArgMemOnly:
7468         if (UseArgMemOnly)
7469           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7470         else
7471           IRP.removeAttrs({Attribute::ArgMemOnly});
7472         break;
7473       case Attribute::InaccessibleMemOrArgMemOnly:
7474         if (UseArgMemOnly)
7475           State.addKnownBits(inverseLocation(
7476               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7477         else
7478           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7479         break;
7480       default:
7481         llvm_unreachable("Unexpected attribute!");
7482       }
7483     }
7484   }
7485 
7486   /// See AbstractAttribute::getDeducedAttributes(...).
7487   void getDeducedAttributes(LLVMContext &Ctx,
7488                             SmallVectorImpl<Attribute> &Attrs) const override {
7489     assert(Attrs.size() == 0);
7490     if (isAssumedReadNone()) {
7491       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7492     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7493       if (isAssumedInaccessibleMemOnly())
7494         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7495       else if (isAssumedArgMemOnly())
7496         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7497       else if (isAssumedInaccessibleOrArgMemOnly())
7498         Attrs.push_back(
7499             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7500     }
7501     assert(Attrs.size() <= 1);
7502   }
7503 
7504   /// See AbstractAttribute::manifest(...).
7505   ChangeStatus manifest(Attributor &A) override {
7506     const IRPosition &IRP = getIRPosition();
7507 
7508     // Check if we would improve the existing attributes first.
7509     SmallVector<Attribute, 4> DeducedAttrs;
7510     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7511     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7512           return IRP.hasAttr(Attr.getKindAsEnum(),
7513                              /* IgnoreSubsumingPositions */ true);
7514         }))
7515       return ChangeStatus::UNCHANGED;
7516 
7517     // Clear existing attributes.
7518     IRP.removeAttrs(AttrKinds);
7519     if (isAssumedReadNone())
7520       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7521 
7522     // Use the generic manifest method.
7523     return IRAttribute::manifest(A);
7524   }
7525 
7526   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7527   bool checkForAllAccessesToMemoryKind(
7528       function_ref<bool(const Instruction *, const Value *, AccessKind,
7529                         MemoryLocationsKind)>
7530           Pred,
7531       MemoryLocationsKind RequestedMLK) const override {
7532     if (!isValidState())
7533       return false;
7534 
7535     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7536     if (AssumedMLK == NO_LOCATIONS)
7537       return true;
7538 
7539     unsigned Idx = 0;
7540     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7541          CurMLK *= 2, ++Idx) {
7542       if (CurMLK & RequestedMLK)
7543         continue;
7544 
7545       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7546         for (const AccessInfo &AI : *Accesses)
7547           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7548             return false;
7549     }
7550 
7551     return true;
7552   }
7553 
7554   ChangeStatus indicatePessimisticFixpoint() override {
7555     // If we give up and indicate a pessimistic fixpoint this instruction will
7556     // become an access for all potential access kinds:
7557     // TODO: Add pointers for argmemonly and globals to improve the results of
7558     //       checkForAllAccessesToMemoryKind.
7559     bool Changed = false;
7560     MemoryLocationsKind KnownMLK = getKnown();
7561     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7562     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7563       if (!(CurMLK & KnownMLK))
7564         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7565                                   getAccessKindFromInst(I));
7566     return AAMemoryLocation::indicatePessimisticFixpoint();
7567   }
7568 
7569 protected:
7570   /// Helper struct to tie together an instruction that has a read or write
7571   /// effect with the pointer it accesses (if any).
7572   struct AccessInfo {
7573 
7574     /// The instruction that caused the access.
7575     const Instruction *I;
7576 
7577     /// The base pointer that is accessed, or null if unknown.
7578     const Value *Ptr;
7579 
7580     /// The kind of access (read/write/read+write).
7581     AccessKind Kind;
7582 
7583     bool operator==(const AccessInfo &RHS) const {
7584       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7585     }
7586     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7587       if (LHS.I != RHS.I)
7588         return LHS.I < RHS.I;
7589       if (LHS.Ptr != RHS.Ptr)
7590         return LHS.Ptr < RHS.Ptr;
7591       if (LHS.Kind != RHS.Kind)
7592         return LHS.Kind < RHS.Kind;
7593       return false;
7594     }
7595   };
7596 
7597   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7598   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7599   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7600   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7601 
7602   /// Categorize the pointer arguments of CB that might access memory in
7603   /// AccessedLoc and update the state and access map accordingly.
7604   void
7605   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7606                                      AAMemoryLocation::StateType &AccessedLocs,
7607                                      bool &Changed);
7608 
7609   /// Return the kind(s) of location that may be accessed by \p V.
7610   AAMemoryLocation::MemoryLocationsKind
7611   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7612 
7613   /// Return the access kind as determined by \p I.
7614   AccessKind getAccessKindFromInst(const Instruction *I) {
7615     AccessKind AK = READ_WRITE;
7616     if (I) {
7617       AK = I->mayReadFromMemory() ? READ : NONE;
7618       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7619     }
7620     return AK;
7621   }
7622 
7623   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7624   /// an access of kind \p AK to a \p MLK memory location with the access
7625   /// pointer \p Ptr.
7626   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7627                                  MemoryLocationsKind MLK, const Instruction *I,
7628                                  const Value *Ptr, bool &Changed,
7629                                  AccessKind AK = READ_WRITE) {
7630 
7631     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7632     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7633     if (!Accesses)
7634       Accesses = new (Allocator) AccessSet();
7635     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7636     State.removeAssumedBits(MLK);
7637   }
7638 
7639   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7640   /// arguments, and update the state and access map accordingly.
7641   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7642                           AAMemoryLocation::StateType &State, bool &Changed);
7643 
7644   /// Used to allocate access sets.
7645   BumpPtrAllocator &Allocator;
7646 
7647   /// The set of IR attributes AAMemoryLocation deals with.
7648   static const Attribute::AttrKind AttrKinds[4];
7649 };
7650 
7651 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7652     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7653     Attribute::InaccessibleMemOrArgMemOnly};
7654 
7655 void AAMemoryLocationImpl::categorizePtrValue(
7656     Attributor &A, const Instruction &I, const Value &Ptr,
7657     AAMemoryLocation::StateType &State, bool &Changed) {
7658   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7659                     << Ptr << " ["
7660                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7661 
7662   SmallVector<Value *, 8> Objects;
7663   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) {
7664     LLVM_DEBUG(
7665         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7666     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7667                               getAccessKindFromInst(&I));
7668     return;
7669   }
7670 
7671   for (Value *Obj : Objects) {
7672     // TODO: recognize the TBAA used for constant accesses.
7673     MemoryLocationsKind MLK = NO_LOCATIONS;
7674     assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
7675     if (isa<UndefValue>(Obj))
7676       continue;
7677     if (isa<Argument>(Obj)) {
7678       // TODO: For now we do not treat byval arguments as local copies performed
7679       // on the call edge, though, we should. To make that happen we need to
7680       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7681       // would also allow us to mark functions only accessing byval arguments as
7682       // readnone again, atguably their acceses have no effect outside of the
7683       // function, like accesses to allocas.
7684       MLK = NO_ARGUMENT_MEM;
7685     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7686       // Reading constant memory is not treated as a read "effect" by the
7687       // function attr pass so we won't neither. Constants defined by TBAA are
7688       // similar. (We know we do not write it because it is constant.)
7689       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7690         if (GVar->isConstant())
7691           continue;
7692 
7693       if (GV->hasLocalLinkage())
7694         MLK = NO_GLOBAL_INTERNAL_MEM;
7695       else
7696         MLK = NO_GLOBAL_EXTERNAL_MEM;
7697     } else if (isa<ConstantPointerNull>(Obj) &&
7698                !NullPointerIsDefined(getAssociatedFunction(),
7699                                      Ptr.getType()->getPointerAddressSpace())) {
7700       continue;
7701     } else if (isa<AllocaInst>(Obj)) {
7702       MLK = NO_LOCAL_MEM;
7703     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7704       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7705           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7706       if (NoAliasAA.isAssumedNoAlias())
7707         MLK = NO_MALLOCED_MEM;
7708       else
7709         MLK = NO_UNKOWN_MEM;
7710     } else {
7711       MLK = NO_UNKOWN_MEM;
7712     }
7713 
7714     assert(MLK != NO_LOCATIONS && "No location specified!");
7715     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7716                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7717                       << "\n");
7718     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7719                               getAccessKindFromInst(&I));
7720   }
7721 
7722   LLVM_DEBUG(
7723       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7724              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7725 }
7726 
7727 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7728     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7729     bool &Changed) {
7730   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7731 
7732     // Skip non-pointer arguments.
7733     const Value *ArgOp = CB.getArgOperand(ArgNo);
7734     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7735       continue;
7736 
7737     // Skip readnone arguments.
7738     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7739     const auto &ArgOpMemLocationAA =
7740         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7741 
7742     if (ArgOpMemLocationAA.isAssumedReadNone())
7743       continue;
7744 
7745     // Categorize potentially accessed pointer arguments as if there was an
7746     // access instruction with them as pointer.
7747     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7748   }
7749 }
7750 
7751 AAMemoryLocation::MemoryLocationsKind
7752 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7753                                                   bool &Changed) {
7754   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7755                     << I << "\n");
7756 
7757   AAMemoryLocation::StateType AccessedLocs;
7758   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7759 
7760   if (auto *CB = dyn_cast<CallBase>(&I)) {
7761 
7762     // First check if we assume any memory is access is visible.
7763     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7764         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7765     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7766                       << " [" << CBMemLocationAA << "]\n");
7767 
7768     if (CBMemLocationAA.isAssumedReadNone())
7769       return NO_LOCATIONS;
7770 
7771     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7772       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7773                                 Changed, getAccessKindFromInst(&I));
7774       return AccessedLocs.getAssumed();
7775     }
7776 
7777     uint32_t CBAssumedNotAccessedLocs =
7778         CBMemLocationAA.getAssumedNotAccessedLocation();
7779 
7780     // Set the argmemonly and global bit as we handle them separately below.
7781     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7782         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7783 
7784     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7785       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7786         continue;
7787       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7788                                 getAccessKindFromInst(&I));
7789     }
7790 
7791     // Now handle global memory if it might be accessed. This is slightly tricky
7792     // as NO_GLOBAL_MEM has multiple bits set.
7793     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7794     if (HasGlobalAccesses) {
7795       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7796                             AccessKind Kind, MemoryLocationsKind MLK) {
7797         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7798                                   getAccessKindFromInst(&I));
7799         return true;
7800       };
7801       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7802               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7803         return AccessedLocs.getWorstState();
7804     }
7805 
7806     LLVM_DEBUG(
7807         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7808                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7809 
7810     // Now handle argument memory if it might be accessed.
7811     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7812     if (HasArgAccesses)
7813       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7814 
7815     LLVM_DEBUG(
7816         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7817                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7818 
7819     return AccessedLocs.getAssumed();
7820   }
7821 
7822   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7823     LLVM_DEBUG(
7824         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7825                << I << " [" << *Ptr << "]\n");
7826     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7827     return AccessedLocs.getAssumed();
7828   }
7829 
7830   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7831                     << I << "\n");
7832   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7833                             getAccessKindFromInst(&I));
7834   return AccessedLocs.getAssumed();
7835 }
7836 
7837 /// An AA to represent the memory behavior function attributes.
7838 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7839   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7840       : AAMemoryLocationImpl(IRP, A) {}
7841 
7842   /// See AbstractAttribute::updateImpl(Attributor &A).
7843   virtual ChangeStatus updateImpl(Attributor &A) override {
7844 
7845     const auto &MemBehaviorAA =
7846         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7847     if (MemBehaviorAA.isAssumedReadNone()) {
7848       if (MemBehaviorAA.isKnownReadNone())
7849         return indicateOptimisticFixpoint();
7850       assert(isAssumedReadNone() &&
7851              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7852       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7853       return ChangeStatus::UNCHANGED;
7854     }
7855 
7856     // The current assumed state used to determine a change.
7857     auto AssumedState = getAssumed();
7858     bool Changed = false;
7859 
7860     auto CheckRWInst = [&](Instruction &I) {
7861       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7862       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7863                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7864       removeAssumedBits(inverseLocation(MLK, false, false));
7865       // Stop once only the valid bit set in the *not assumed location*, thus
7866       // once we don't actually exclude any memory locations in the state.
7867       return getAssumedNotAccessedLocation() != VALID_STATE;
7868     };
7869 
7870     bool UsedAssumedInformation = false;
7871     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7872                                             UsedAssumedInformation))
7873       return indicatePessimisticFixpoint();
7874 
7875     Changed |= AssumedState != getAssumed();
7876     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7877   }
7878 
7879   /// See AbstractAttribute::trackStatistics()
7880   void trackStatistics() const override {
7881     if (isAssumedReadNone())
7882       STATS_DECLTRACK_FN_ATTR(readnone)
7883     else if (isAssumedArgMemOnly())
7884       STATS_DECLTRACK_FN_ATTR(argmemonly)
7885     else if (isAssumedInaccessibleMemOnly())
7886       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7887     else if (isAssumedInaccessibleOrArgMemOnly())
7888       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7889   }
7890 };
7891 
7892 /// AAMemoryLocation attribute for call sites.
7893 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7894   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7895       : AAMemoryLocationImpl(IRP, A) {}
7896 
7897   /// See AbstractAttribute::initialize(...).
7898   void initialize(Attributor &A) override {
7899     AAMemoryLocationImpl::initialize(A);
7900     Function *F = getAssociatedFunction();
7901     if (!F || F->isDeclaration())
7902       indicatePessimisticFixpoint();
7903   }
7904 
7905   /// See AbstractAttribute::updateImpl(...).
7906   ChangeStatus updateImpl(Attributor &A) override {
7907     // TODO: Once we have call site specific value information we can provide
7908     //       call site specific liveness liveness information and then it makes
7909     //       sense to specialize attributes for call sites arguments instead of
7910     //       redirecting requests to the callee argument.
7911     Function *F = getAssociatedFunction();
7912     const IRPosition &FnPos = IRPosition::function(*F);
7913     auto &FnAA =
7914         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7915     bool Changed = false;
7916     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7917                           AccessKind Kind, MemoryLocationsKind MLK) {
7918       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7919                                 getAccessKindFromInst(I));
7920       return true;
7921     };
7922     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7923       return indicatePessimisticFixpoint();
7924     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7925   }
7926 
7927   /// See AbstractAttribute::trackStatistics()
7928   void trackStatistics() const override {
7929     if (isAssumedReadNone())
7930       STATS_DECLTRACK_CS_ATTR(readnone)
7931   }
7932 };
7933 
7934 /// ------------------ Value Constant Range Attribute -------------------------
7935 
7936 struct AAValueConstantRangeImpl : AAValueConstantRange {
7937   using StateType = IntegerRangeState;
7938   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7939       : AAValueConstantRange(IRP, A) {}
7940 
7941   /// See AbstractAttribute::initialize(..).
7942   void initialize(Attributor &A) override {
7943     if (A.hasSimplificationCallback(getIRPosition())) {
7944       indicatePessimisticFixpoint();
7945       return;
7946     }
7947 
7948     // Intersect a range given by SCEV.
7949     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7950 
7951     // Intersect a range given by LVI.
7952     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7953   }
7954 
7955   /// See AbstractAttribute::getAsStr().
7956   const std::string getAsStr() const override {
7957     std::string Str;
7958     llvm::raw_string_ostream OS(Str);
7959     OS << "range(" << getBitWidth() << ")<";
7960     getKnown().print(OS);
7961     OS << " / ";
7962     getAssumed().print(OS);
7963     OS << ">";
7964     return OS.str();
7965   }
7966 
7967   /// Helper function to get a SCEV expr for the associated value at program
7968   /// point \p I.
7969   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7970     if (!getAnchorScope())
7971       return nullptr;
7972 
7973     ScalarEvolution *SE =
7974         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7975             *getAnchorScope());
7976 
7977     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7978         *getAnchorScope());
7979 
7980     if (!SE || !LI)
7981       return nullptr;
7982 
7983     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7984     if (!I)
7985       return S;
7986 
7987     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7988   }
7989 
7990   /// Helper function to get a range from SCEV for the associated value at
7991   /// program point \p I.
7992   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7993                                          const Instruction *I = nullptr) const {
7994     if (!getAnchorScope())
7995       return getWorstState(getBitWidth());
7996 
7997     ScalarEvolution *SE =
7998         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7999             *getAnchorScope());
8000 
8001     const SCEV *S = getSCEV(A, I);
8002     if (!SE || !S)
8003       return getWorstState(getBitWidth());
8004 
8005     return SE->getUnsignedRange(S);
8006   }
8007 
8008   /// Helper function to get a range from LVI for the associated value at
8009   /// program point \p I.
8010   ConstantRange
8011   getConstantRangeFromLVI(Attributor &A,
8012                           const Instruction *CtxI = nullptr) const {
8013     if (!getAnchorScope())
8014       return getWorstState(getBitWidth());
8015 
8016     LazyValueInfo *LVI =
8017         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8018             *getAnchorScope());
8019 
8020     if (!LVI || !CtxI)
8021       return getWorstState(getBitWidth());
8022     return LVI->getConstantRange(&getAssociatedValue(),
8023                                  const_cast<Instruction *>(CtxI));
8024   }
8025 
8026   /// Return true if \p CtxI is valid for querying outside analyses.
8027   /// This basically makes sure we do not ask intra-procedural analysis
8028   /// about a context in the wrong function or a context that violates
8029   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8030   /// if the original context of this AA is OK or should be considered invalid.
8031   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8032                                                const Instruction *CtxI,
8033                                                bool AllowAACtxI) const {
8034     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8035       return false;
8036 
8037     // Our context might be in a different function, neither intra-procedural
8038     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8039     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8040       return false;
8041 
8042     // If the context is not dominated by the value there are paths to the
8043     // context that do not define the value. This cannot be handled by
8044     // LazyValueInfo so we need to bail.
8045     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8046       InformationCache &InfoCache = A.getInfoCache();
8047       const DominatorTree *DT =
8048           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8049               *I->getFunction());
8050       return DT && DT->dominates(I, CtxI);
8051     }
8052 
8053     return true;
8054   }
8055 
8056   /// See AAValueConstantRange::getKnownConstantRange(..).
8057   ConstantRange
8058   getKnownConstantRange(Attributor &A,
8059                         const Instruction *CtxI = nullptr) const override {
8060     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8061                                                  /* AllowAACtxI */ false))
8062       return getKnown();
8063 
8064     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8065     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8066     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8067   }
8068 
8069   /// See AAValueConstantRange::getAssumedConstantRange(..).
8070   ConstantRange
8071   getAssumedConstantRange(Attributor &A,
8072                           const Instruction *CtxI = nullptr) const override {
8073     // TODO: Make SCEV use Attributor assumption.
8074     //       We may be able to bound a variable range via assumptions in
8075     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8076     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8077     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8078                                                  /* AllowAACtxI */ false))
8079       return getAssumed();
8080 
8081     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8082     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8083     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8084   }
8085 
8086   /// Helper function to create MDNode for range metadata.
8087   static MDNode *
8088   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8089                             const ConstantRange &AssumedConstantRange) {
8090     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8091                                   Ty, AssumedConstantRange.getLower())),
8092                               ConstantAsMetadata::get(ConstantInt::get(
8093                                   Ty, AssumedConstantRange.getUpper()))};
8094     return MDNode::get(Ctx, LowAndHigh);
8095   }
8096 
8097   /// Return true if \p Assumed is included in \p KnownRanges.
8098   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8099 
8100     if (Assumed.isFullSet())
8101       return false;
8102 
8103     if (!KnownRanges)
8104       return true;
8105 
8106     // If multiple ranges are annotated in IR, we give up to annotate assumed
8107     // range for now.
8108 
8109     // TODO:  If there exists a known range which containts assumed range, we
8110     // can say assumed range is better.
8111     if (KnownRanges->getNumOperands() > 2)
8112       return false;
8113 
8114     ConstantInt *Lower =
8115         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8116     ConstantInt *Upper =
8117         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8118 
8119     ConstantRange Known(Lower->getValue(), Upper->getValue());
8120     return Known.contains(Assumed) && Known != Assumed;
8121   }
8122 
8123   /// Helper function to set range metadata.
8124   static bool
8125   setRangeMetadataIfisBetterRange(Instruction *I,
8126                                   const ConstantRange &AssumedConstantRange) {
8127     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8128     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8129       if (!AssumedConstantRange.isEmptySet()) {
8130         I->setMetadata(LLVMContext::MD_range,
8131                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8132                                                  AssumedConstantRange));
8133         return true;
8134       }
8135     }
8136     return false;
8137   }
8138 
8139   /// See AbstractAttribute::manifest()
8140   ChangeStatus manifest(Attributor &A) override {
8141     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8142     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8143     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8144 
8145     auto &V = getAssociatedValue();
8146     if (!AssumedConstantRange.isEmptySet() &&
8147         !AssumedConstantRange.isSingleElement()) {
8148       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8149         assert(I == getCtxI() && "Should not annotate an instruction which is "
8150                                  "not the context instruction");
8151         if (isa<CallInst>(I) || isa<LoadInst>(I))
8152           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8153             Changed = ChangeStatus::CHANGED;
8154       }
8155     }
8156 
8157     return Changed;
8158   }
8159 };
8160 
8161 struct AAValueConstantRangeArgument final
8162     : AAArgumentFromCallSiteArguments<
8163           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8164           true /* BridgeCallBaseContext */> {
8165   using Base = AAArgumentFromCallSiteArguments<
8166       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8167       true /* BridgeCallBaseContext */>;
8168   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8169       : Base(IRP, A) {}
8170 
8171   /// See AbstractAttribute::initialize(..).
8172   void initialize(Attributor &A) override {
8173     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8174       indicatePessimisticFixpoint();
8175     } else {
8176       Base::initialize(A);
8177     }
8178   }
8179 
8180   /// See AbstractAttribute::trackStatistics()
8181   void trackStatistics() const override {
8182     STATS_DECLTRACK_ARG_ATTR(value_range)
8183   }
8184 };
8185 
8186 struct AAValueConstantRangeReturned
8187     : AAReturnedFromReturnedValues<AAValueConstantRange,
8188                                    AAValueConstantRangeImpl,
8189                                    AAValueConstantRangeImpl::StateType,
8190                                    /* PropogateCallBaseContext */ true> {
8191   using Base =
8192       AAReturnedFromReturnedValues<AAValueConstantRange,
8193                                    AAValueConstantRangeImpl,
8194                                    AAValueConstantRangeImpl::StateType,
8195                                    /* PropogateCallBaseContext */ true>;
8196   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8197       : Base(IRP, A) {}
8198 
8199   /// See AbstractAttribute::initialize(...).
8200   void initialize(Attributor &A) override {}
8201 
8202   /// See AbstractAttribute::trackStatistics()
8203   void trackStatistics() const override {
8204     STATS_DECLTRACK_FNRET_ATTR(value_range)
8205   }
8206 };
8207 
8208 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8209   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8210       : AAValueConstantRangeImpl(IRP, A) {}
8211 
8212   /// See AbstractAttribute::initialize(...).
8213   void initialize(Attributor &A) override {
8214     AAValueConstantRangeImpl::initialize(A);
8215     if (isAtFixpoint())
8216       return;
8217 
8218     Value &V = getAssociatedValue();
8219 
8220     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8221       unionAssumed(ConstantRange(C->getValue()));
8222       indicateOptimisticFixpoint();
8223       return;
8224     }
8225 
8226     if (isa<UndefValue>(&V)) {
8227       // Collapse the undef state to 0.
8228       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8229       indicateOptimisticFixpoint();
8230       return;
8231     }
8232 
8233     if (isa<CallBase>(&V))
8234       return;
8235 
8236     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8237       return;
8238 
8239     // If it is a load instruction with range metadata, use it.
8240     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8241       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8242         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8243         return;
8244       }
8245 
8246     // We can work with PHI and select instruction as we traverse their operands
8247     // during update.
8248     if (isa<SelectInst>(V) || isa<PHINode>(V))
8249       return;
8250 
8251     // Otherwise we give up.
8252     indicatePessimisticFixpoint();
8253 
8254     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8255                       << getAssociatedValue() << "\n");
8256   }
8257 
8258   bool calculateBinaryOperator(
8259       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8260       const Instruction *CtxI,
8261       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8262     Value *LHS = BinOp->getOperand(0);
8263     Value *RHS = BinOp->getOperand(1);
8264 
8265     // Simplify the operands first.
8266     bool UsedAssumedInformation = false;
8267     const auto &SimplifiedLHS =
8268         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8269                                *this, UsedAssumedInformation);
8270     if (!SimplifiedLHS.hasValue())
8271       return true;
8272     if (!SimplifiedLHS.getValue())
8273       return false;
8274     LHS = *SimplifiedLHS;
8275 
8276     const auto &SimplifiedRHS =
8277         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8278                                *this, UsedAssumedInformation);
8279     if (!SimplifiedRHS.hasValue())
8280       return true;
8281     if (!SimplifiedRHS.getValue())
8282       return false;
8283     RHS = *SimplifiedRHS;
8284 
8285     // TODO: Allow non integers as well.
8286     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8287       return false;
8288 
8289     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8290         *this, IRPosition::value(*LHS, getCallBaseContext()),
8291         DepClassTy::REQUIRED);
8292     QuerriedAAs.push_back(&LHSAA);
8293     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8294 
8295     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8296         *this, IRPosition::value(*RHS, getCallBaseContext()),
8297         DepClassTy::REQUIRED);
8298     QuerriedAAs.push_back(&RHSAA);
8299     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8300 
8301     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8302 
8303     T.unionAssumed(AssumedRange);
8304 
8305     // TODO: Track a known state too.
8306 
8307     return T.isValidState();
8308   }
8309 
8310   bool calculateCastInst(
8311       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8312       const Instruction *CtxI,
8313       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8314     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8315     // TODO: Allow non integers as well.
8316     Value *OpV = CastI->getOperand(0);
8317 
8318     // Simplify the operand first.
8319     bool UsedAssumedInformation = false;
8320     const auto &SimplifiedOpV =
8321         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8322                                *this, UsedAssumedInformation);
8323     if (!SimplifiedOpV.hasValue())
8324       return true;
8325     if (!SimplifiedOpV.getValue())
8326       return false;
8327     OpV = *SimplifiedOpV;
8328 
8329     if (!OpV->getType()->isIntegerTy())
8330       return false;
8331 
8332     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8333         *this, IRPosition::value(*OpV, getCallBaseContext()),
8334         DepClassTy::REQUIRED);
8335     QuerriedAAs.push_back(&OpAA);
8336     T.unionAssumed(
8337         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8338     return T.isValidState();
8339   }
8340 
8341   bool
8342   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8343                    const Instruction *CtxI,
8344                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8345     Value *LHS = CmpI->getOperand(0);
8346     Value *RHS = CmpI->getOperand(1);
8347 
8348     // Simplify the operands first.
8349     bool UsedAssumedInformation = false;
8350     const auto &SimplifiedLHS =
8351         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8352                                *this, UsedAssumedInformation);
8353     if (!SimplifiedLHS.hasValue())
8354       return true;
8355     if (!SimplifiedLHS.getValue())
8356       return false;
8357     LHS = *SimplifiedLHS;
8358 
8359     const auto &SimplifiedRHS =
8360         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8361                                *this, UsedAssumedInformation);
8362     if (!SimplifiedRHS.hasValue())
8363       return true;
8364     if (!SimplifiedRHS.getValue())
8365       return false;
8366     RHS = *SimplifiedRHS;
8367 
8368     // TODO: Allow non integers as well.
8369     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8370       return false;
8371 
8372     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8373         *this, IRPosition::value(*LHS, getCallBaseContext()),
8374         DepClassTy::REQUIRED);
8375     QuerriedAAs.push_back(&LHSAA);
8376     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8377         *this, IRPosition::value(*RHS, getCallBaseContext()),
8378         DepClassTy::REQUIRED);
8379     QuerriedAAs.push_back(&RHSAA);
8380     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8381     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8382 
8383     // If one of them is empty set, we can't decide.
8384     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8385       return true;
8386 
8387     bool MustTrue = false, MustFalse = false;
8388 
8389     auto AllowedRegion =
8390         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8391 
8392     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8393       MustFalse = true;
8394 
8395     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8396       MustTrue = true;
8397 
8398     assert((!MustTrue || !MustFalse) &&
8399            "Either MustTrue or MustFalse should be false!");
8400 
8401     if (MustTrue)
8402       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8403     else if (MustFalse)
8404       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8405     else
8406       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8407 
8408     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8409                       << " " << RHSAA << "\n");
8410 
8411     // TODO: Track a known state too.
8412     return T.isValidState();
8413   }
8414 
8415   /// See AbstractAttribute::updateImpl(...).
8416   ChangeStatus updateImpl(Attributor &A) override {
8417     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8418                             IntegerRangeState &T, bool Stripped) -> bool {
8419       Instruction *I = dyn_cast<Instruction>(&V);
8420       if (!I || isa<CallBase>(I)) {
8421 
8422         // Simplify the operand first.
8423         bool UsedAssumedInformation = false;
8424         const auto &SimplifiedOpV =
8425             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8426                                    *this, UsedAssumedInformation);
8427         if (!SimplifiedOpV.hasValue())
8428           return true;
8429         if (!SimplifiedOpV.getValue())
8430           return false;
8431         Value *VPtr = *SimplifiedOpV;
8432 
8433         // If the value is not instruction, we query AA to Attributor.
8434         const auto &AA = A.getAAFor<AAValueConstantRange>(
8435             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8436             DepClassTy::REQUIRED);
8437 
8438         // Clamp operator is not used to utilize a program point CtxI.
8439         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8440 
8441         return T.isValidState();
8442       }
8443 
8444       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8445       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8446         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8447           return false;
8448       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8449         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8450           return false;
8451       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8452         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8453           return false;
8454       } else {
8455         // Give up with other instructions.
8456         // TODO: Add other instructions
8457 
8458         T.indicatePessimisticFixpoint();
8459         return false;
8460       }
8461 
8462       // Catch circular reasoning in a pessimistic way for now.
8463       // TODO: Check how the range evolves and if we stripped anything, see also
8464       //       AADereferenceable or AAAlign for similar situations.
8465       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8466         if (QueriedAA != this)
8467           continue;
8468         // If we are in a stady state we do not need to worry.
8469         if (T.getAssumed() == getState().getAssumed())
8470           continue;
8471         T.indicatePessimisticFixpoint();
8472       }
8473 
8474       return T.isValidState();
8475     };
8476 
8477     IntegerRangeState T(getBitWidth());
8478 
8479     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8480                                                   VisitValueCB, getCtxI(),
8481                                                   /* UseValueSimplify */ false))
8482       return indicatePessimisticFixpoint();
8483 
8484     return clampStateAndIndicateChange(getState(), T);
8485   }
8486 
8487   /// See AbstractAttribute::trackStatistics()
8488   void trackStatistics() const override {
8489     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8490   }
8491 };
8492 
8493 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8494   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8495       : AAValueConstantRangeImpl(IRP, A) {}
8496 
8497   /// See AbstractAttribute::initialize(...).
8498   ChangeStatus updateImpl(Attributor &A) override {
8499     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8500                      "not be called");
8501   }
8502 
8503   /// See AbstractAttribute::trackStatistics()
8504   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8505 };
8506 
8507 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8508   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8509       : AAValueConstantRangeFunction(IRP, A) {}
8510 
8511   /// See AbstractAttribute::trackStatistics()
8512   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8513 };
8514 
8515 struct AAValueConstantRangeCallSiteReturned
8516     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8517                                      AAValueConstantRangeImpl,
8518                                      AAValueConstantRangeImpl::StateType,
8519                                      /* IntroduceCallBaseContext */ true> {
8520   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8521       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8522                                        AAValueConstantRangeImpl,
8523                                        AAValueConstantRangeImpl::StateType,
8524                                        /* IntroduceCallBaseContext */ true>(IRP,
8525                                                                             A) {
8526   }
8527 
8528   /// See AbstractAttribute::initialize(...).
8529   void initialize(Attributor &A) override {
8530     // If it is a load instruction with range metadata, use the metadata.
8531     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8532       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8533         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8534 
8535     AAValueConstantRangeImpl::initialize(A);
8536   }
8537 
8538   /// See AbstractAttribute::trackStatistics()
8539   void trackStatistics() const override {
8540     STATS_DECLTRACK_CSRET_ATTR(value_range)
8541   }
8542 };
8543 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8544   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8545       : AAValueConstantRangeFloating(IRP, A) {}
8546 
8547   /// See AbstractAttribute::manifest()
8548   ChangeStatus manifest(Attributor &A) override {
8549     return ChangeStatus::UNCHANGED;
8550   }
8551 
8552   /// See AbstractAttribute::trackStatistics()
8553   void trackStatistics() const override {
8554     STATS_DECLTRACK_CSARG_ATTR(value_range)
8555   }
8556 };
8557 
8558 /// ------------------ Potential Values Attribute -------------------------
8559 
8560 struct AAPotentialValuesImpl : AAPotentialValues {
8561   using StateType = PotentialConstantIntValuesState;
8562 
8563   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8564       : AAPotentialValues(IRP, A) {}
8565 
8566   /// See AbstractAttribute::initialize(..).
8567   void initialize(Attributor &A) override {
8568     if (A.hasSimplificationCallback(getIRPosition()))
8569       indicatePessimisticFixpoint();
8570     else
8571       AAPotentialValues::initialize(A);
8572   }
8573 
8574   /// See AbstractAttribute::getAsStr().
8575   const std::string getAsStr() const override {
8576     std::string Str;
8577     llvm::raw_string_ostream OS(Str);
8578     OS << getState();
8579     return OS.str();
8580   }
8581 
8582   /// See AbstractAttribute::updateImpl(...).
8583   ChangeStatus updateImpl(Attributor &A) override {
8584     return indicatePessimisticFixpoint();
8585   }
8586 };
8587 
8588 struct AAPotentialValuesArgument final
8589     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8590                                       PotentialConstantIntValuesState> {
8591   using Base =
8592       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8593                                       PotentialConstantIntValuesState>;
8594   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8595       : Base(IRP, A) {}
8596 
8597   /// See AbstractAttribute::initialize(..).
8598   void initialize(Attributor &A) override {
8599     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8600       indicatePessimisticFixpoint();
8601     } else {
8602       Base::initialize(A);
8603     }
8604   }
8605 
8606   /// See AbstractAttribute::trackStatistics()
8607   void trackStatistics() const override {
8608     STATS_DECLTRACK_ARG_ATTR(potential_values)
8609   }
8610 };
8611 
8612 struct AAPotentialValuesReturned
8613     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8614   using Base =
8615       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8616   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8617       : Base(IRP, A) {}
8618 
8619   /// See AbstractAttribute::trackStatistics()
8620   void trackStatistics() const override {
8621     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8622   }
8623 };
8624 
8625 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8626   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8627       : AAPotentialValuesImpl(IRP, A) {}
8628 
8629   /// See AbstractAttribute::initialize(..).
8630   void initialize(Attributor &A) override {
8631     AAPotentialValuesImpl::initialize(A);
8632     if (isAtFixpoint())
8633       return;
8634 
8635     Value &V = getAssociatedValue();
8636 
8637     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8638       unionAssumed(C->getValue());
8639       indicateOptimisticFixpoint();
8640       return;
8641     }
8642 
8643     if (isa<UndefValue>(&V)) {
8644       unionAssumedWithUndef();
8645       indicateOptimisticFixpoint();
8646       return;
8647     }
8648 
8649     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8650       return;
8651 
8652     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8653       return;
8654 
8655     indicatePessimisticFixpoint();
8656 
8657     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8658                       << getAssociatedValue() << "\n");
8659   }
8660 
8661   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8662                                 const APInt &RHS) {
8663     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
8664   }
8665 
8666   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8667                                  uint32_t ResultBitWidth) {
8668     Instruction::CastOps CastOp = CI->getOpcode();
8669     switch (CastOp) {
8670     default:
8671       llvm_unreachable("unsupported or not integer cast");
8672     case Instruction::Trunc:
8673       return Src.trunc(ResultBitWidth);
8674     case Instruction::SExt:
8675       return Src.sext(ResultBitWidth);
8676     case Instruction::ZExt:
8677       return Src.zext(ResultBitWidth);
8678     case Instruction::BitCast:
8679       return Src;
8680     }
8681   }
8682 
8683   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8684                                        const APInt &LHS, const APInt &RHS,
8685                                        bool &SkipOperation, bool &Unsupported) {
8686     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8687     // Unsupported is set to true when the binary operator is not supported.
8688     // SkipOperation is set to true when UB occur with the given operand pair
8689     // (LHS, RHS).
8690     // TODO: we should look at nsw and nuw keywords to handle operations
8691     //       that create poison or undef value.
8692     switch (BinOpcode) {
8693     default:
8694       Unsupported = true;
8695       return LHS;
8696     case Instruction::Add:
8697       return LHS + RHS;
8698     case Instruction::Sub:
8699       return LHS - RHS;
8700     case Instruction::Mul:
8701       return LHS * RHS;
8702     case Instruction::UDiv:
8703       if (RHS.isZero()) {
8704         SkipOperation = true;
8705         return LHS;
8706       }
8707       return LHS.udiv(RHS);
8708     case Instruction::SDiv:
8709       if (RHS.isZero()) {
8710         SkipOperation = true;
8711         return LHS;
8712       }
8713       return LHS.sdiv(RHS);
8714     case Instruction::URem:
8715       if (RHS.isZero()) {
8716         SkipOperation = true;
8717         return LHS;
8718       }
8719       return LHS.urem(RHS);
8720     case Instruction::SRem:
8721       if (RHS.isZero()) {
8722         SkipOperation = true;
8723         return LHS;
8724       }
8725       return LHS.srem(RHS);
8726     case Instruction::Shl:
8727       return LHS.shl(RHS);
8728     case Instruction::LShr:
8729       return LHS.lshr(RHS);
8730     case Instruction::AShr:
8731       return LHS.ashr(RHS);
8732     case Instruction::And:
8733       return LHS & RHS;
8734     case Instruction::Or:
8735       return LHS | RHS;
8736     case Instruction::Xor:
8737       return LHS ^ RHS;
8738     }
8739   }
8740 
8741   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8742                                            const APInt &LHS, const APInt &RHS) {
8743     bool SkipOperation = false;
8744     bool Unsupported = false;
8745     APInt Result =
8746         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8747     if (Unsupported)
8748       return false;
8749     // If SkipOperation is true, we can ignore this operand pair (L, R).
8750     if (!SkipOperation)
8751       unionAssumed(Result);
8752     return isValidState();
8753   }
8754 
8755   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8756     auto AssumedBefore = getAssumed();
8757     Value *LHS = ICI->getOperand(0);
8758     Value *RHS = ICI->getOperand(1);
8759 
8760     // Simplify the operands first.
8761     bool UsedAssumedInformation = false;
8762     const auto &SimplifiedLHS =
8763         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8764                                *this, UsedAssumedInformation);
8765     if (!SimplifiedLHS.hasValue())
8766       return ChangeStatus::UNCHANGED;
8767     if (!SimplifiedLHS.getValue())
8768       return indicatePessimisticFixpoint();
8769     LHS = *SimplifiedLHS;
8770 
8771     const auto &SimplifiedRHS =
8772         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8773                                *this, UsedAssumedInformation);
8774     if (!SimplifiedRHS.hasValue())
8775       return ChangeStatus::UNCHANGED;
8776     if (!SimplifiedRHS.getValue())
8777       return indicatePessimisticFixpoint();
8778     RHS = *SimplifiedRHS;
8779 
8780     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8781       return indicatePessimisticFixpoint();
8782 
8783     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8784                                                 DepClassTy::REQUIRED);
8785     if (!LHSAA.isValidState())
8786       return indicatePessimisticFixpoint();
8787 
8788     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8789                                                 DepClassTy::REQUIRED);
8790     if (!RHSAA.isValidState())
8791       return indicatePessimisticFixpoint();
8792 
8793     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8794     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8795 
8796     // TODO: make use of undef flag to limit potential values aggressively.
8797     bool MaybeTrue = false, MaybeFalse = false;
8798     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8799     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8800       // The result of any comparison between undefs can be soundly replaced
8801       // with undef.
8802       unionAssumedWithUndef();
8803     } else if (LHSAA.undefIsContained()) {
8804       for (const APInt &R : RHSAAPVS) {
8805         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8806         MaybeTrue |= CmpResult;
8807         MaybeFalse |= !CmpResult;
8808         if (MaybeTrue & MaybeFalse)
8809           return indicatePessimisticFixpoint();
8810       }
8811     } else if (RHSAA.undefIsContained()) {
8812       for (const APInt &L : LHSAAPVS) {
8813         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8814         MaybeTrue |= CmpResult;
8815         MaybeFalse |= !CmpResult;
8816         if (MaybeTrue & MaybeFalse)
8817           return indicatePessimisticFixpoint();
8818       }
8819     } else {
8820       for (const APInt &L : LHSAAPVS) {
8821         for (const APInt &R : RHSAAPVS) {
8822           bool CmpResult = calculateICmpInst(ICI, L, R);
8823           MaybeTrue |= CmpResult;
8824           MaybeFalse |= !CmpResult;
8825           if (MaybeTrue & MaybeFalse)
8826             return indicatePessimisticFixpoint();
8827         }
8828       }
8829     }
8830     if (MaybeTrue)
8831       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8832     if (MaybeFalse)
8833       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8834     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8835                                          : ChangeStatus::CHANGED;
8836   }
8837 
8838   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8839     auto AssumedBefore = getAssumed();
8840     Value *LHS = SI->getTrueValue();
8841     Value *RHS = SI->getFalseValue();
8842 
8843     // Simplify the operands first.
8844     bool UsedAssumedInformation = false;
8845     const auto &SimplifiedLHS =
8846         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8847                                *this, UsedAssumedInformation);
8848     if (!SimplifiedLHS.hasValue())
8849       return ChangeStatus::UNCHANGED;
8850     if (!SimplifiedLHS.getValue())
8851       return indicatePessimisticFixpoint();
8852     LHS = *SimplifiedLHS;
8853 
8854     const auto &SimplifiedRHS =
8855         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8856                                *this, UsedAssumedInformation);
8857     if (!SimplifiedRHS.hasValue())
8858       return ChangeStatus::UNCHANGED;
8859     if (!SimplifiedRHS.getValue())
8860       return indicatePessimisticFixpoint();
8861     RHS = *SimplifiedRHS;
8862 
8863     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8864       return indicatePessimisticFixpoint();
8865 
8866     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8867                                                   UsedAssumedInformation);
8868 
8869     // Check if we only need one operand.
8870     bool OnlyLeft = false, OnlyRight = false;
8871     if (C.hasValue() && *C && (*C)->isOneValue())
8872       OnlyLeft = true;
8873     else if (C.hasValue() && *C && (*C)->isZeroValue())
8874       OnlyRight = true;
8875 
8876     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
8877     if (!OnlyRight) {
8878       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8879                                              DepClassTy::REQUIRED);
8880       if (!LHSAA->isValidState())
8881         return indicatePessimisticFixpoint();
8882     }
8883     if (!OnlyLeft) {
8884       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8885                                              DepClassTy::REQUIRED);
8886       if (!RHSAA->isValidState())
8887         return indicatePessimisticFixpoint();
8888     }
8889 
8890     if (!LHSAA || !RHSAA) {
8891       // select (true/false), lhs, rhs
8892       auto *OpAA = LHSAA ? LHSAA : RHSAA;
8893 
8894       if (OpAA->undefIsContained())
8895         unionAssumedWithUndef();
8896       else
8897         unionAssumed(*OpAA);
8898 
8899     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
8900       // select i1 *, undef , undef => undef
8901       unionAssumedWithUndef();
8902     } else {
8903       unionAssumed(*LHSAA);
8904       unionAssumed(*RHSAA);
8905     }
8906     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8907                                          : ChangeStatus::CHANGED;
8908   }
8909 
8910   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8911     auto AssumedBefore = getAssumed();
8912     if (!CI->isIntegerCast())
8913       return indicatePessimisticFixpoint();
8914     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8915     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8916     Value *Src = CI->getOperand(0);
8917 
8918     // Simplify the operand first.
8919     bool UsedAssumedInformation = false;
8920     const auto &SimplifiedSrc =
8921         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
8922                                *this, UsedAssumedInformation);
8923     if (!SimplifiedSrc.hasValue())
8924       return ChangeStatus::UNCHANGED;
8925     if (!SimplifiedSrc.getValue())
8926       return indicatePessimisticFixpoint();
8927     Src = *SimplifiedSrc;
8928 
8929     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
8930                                                 DepClassTy::REQUIRED);
8931     if (!SrcAA.isValidState())
8932       return indicatePessimisticFixpoint();
8933     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
8934     if (SrcAA.undefIsContained())
8935       unionAssumedWithUndef();
8936     else {
8937       for (const APInt &S : SrcAAPVS) {
8938         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8939         unionAssumed(T);
8940       }
8941     }
8942     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8943                                          : ChangeStatus::CHANGED;
8944   }
8945 
8946   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8947     auto AssumedBefore = getAssumed();
8948     Value *LHS = BinOp->getOperand(0);
8949     Value *RHS = BinOp->getOperand(1);
8950 
8951     // Simplify the operands first.
8952     bool UsedAssumedInformation = false;
8953     const auto &SimplifiedLHS =
8954         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8955                                *this, UsedAssumedInformation);
8956     if (!SimplifiedLHS.hasValue())
8957       return ChangeStatus::UNCHANGED;
8958     if (!SimplifiedLHS.getValue())
8959       return indicatePessimisticFixpoint();
8960     LHS = *SimplifiedLHS;
8961 
8962     const auto &SimplifiedRHS =
8963         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8964                                *this, UsedAssumedInformation);
8965     if (!SimplifiedRHS.hasValue())
8966       return ChangeStatus::UNCHANGED;
8967     if (!SimplifiedRHS.getValue())
8968       return indicatePessimisticFixpoint();
8969     RHS = *SimplifiedRHS;
8970 
8971     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8972       return indicatePessimisticFixpoint();
8973 
8974     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8975                                                 DepClassTy::REQUIRED);
8976     if (!LHSAA.isValidState())
8977       return indicatePessimisticFixpoint();
8978 
8979     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8980                                                 DepClassTy::REQUIRED);
8981     if (!RHSAA.isValidState())
8982       return indicatePessimisticFixpoint();
8983 
8984     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8985     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8986     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
8987 
8988     // TODO: make use of undef flag to limit potential values aggressively.
8989     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8990       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
8991         return indicatePessimisticFixpoint();
8992     } else if (LHSAA.undefIsContained()) {
8993       for (const APInt &R : RHSAAPVS) {
8994         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
8995           return indicatePessimisticFixpoint();
8996       }
8997     } else if (RHSAA.undefIsContained()) {
8998       for (const APInt &L : LHSAAPVS) {
8999         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9000           return indicatePessimisticFixpoint();
9001       }
9002     } else {
9003       for (const APInt &L : LHSAAPVS) {
9004         for (const APInt &R : RHSAAPVS) {
9005           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9006             return indicatePessimisticFixpoint();
9007         }
9008       }
9009     }
9010     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9011                                          : ChangeStatus::CHANGED;
9012   }
9013 
9014   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9015     auto AssumedBefore = getAssumed();
9016     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9017       Value *IncomingValue = PHI->getIncomingValue(u);
9018 
9019       // Simplify the operand first.
9020       bool UsedAssumedInformation = false;
9021       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9022           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9023           UsedAssumedInformation);
9024       if (!SimplifiedIncomingValue.hasValue())
9025         continue;
9026       if (!SimplifiedIncomingValue.getValue())
9027         return indicatePessimisticFixpoint();
9028       IncomingValue = *SimplifiedIncomingValue;
9029 
9030       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9031           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9032       if (!PotentialValuesAA.isValidState())
9033         return indicatePessimisticFixpoint();
9034       if (PotentialValuesAA.undefIsContained())
9035         unionAssumedWithUndef();
9036       else
9037         unionAssumed(PotentialValuesAA.getAssumed());
9038     }
9039     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9040                                          : ChangeStatus::CHANGED;
9041   }
9042 
9043   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
9044     if (!L.getType()->isIntegerTy())
9045       return indicatePessimisticFixpoint();
9046 
9047     auto Union = [&](Value &V) {
9048       if (isa<UndefValue>(V)) {
9049         unionAssumedWithUndef();
9050         return true;
9051       }
9052       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
9053         unionAssumed(CI->getValue());
9054         return true;
9055       }
9056       return false;
9057     };
9058     auto AssumedBefore = getAssumed();
9059 
9060     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
9061       return indicatePessimisticFixpoint();
9062 
9063     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9064                                          : ChangeStatus::CHANGED;
9065   }
9066 
9067   /// See AbstractAttribute::updateImpl(...).
9068   ChangeStatus updateImpl(Attributor &A) override {
9069     Value &V = getAssociatedValue();
9070     Instruction *I = dyn_cast<Instruction>(&V);
9071 
9072     if (auto *ICI = dyn_cast<ICmpInst>(I))
9073       return updateWithICmpInst(A, ICI);
9074 
9075     if (auto *SI = dyn_cast<SelectInst>(I))
9076       return updateWithSelectInst(A, SI);
9077 
9078     if (auto *CI = dyn_cast<CastInst>(I))
9079       return updateWithCastInst(A, CI);
9080 
9081     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9082       return updateWithBinaryOperator(A, BinOp);
9083 
9084     if (auto *PHI = dyn_cast<PHINode>(I))
9085       return updateWithPHINode(A, PHI);
9086 
9087     if (auto *L = dyn_cast<LoadInst>(I))
9088       return updateWithLoad(A, *L);
9089 
9090     return indicatePessimisticFixpoint();
9091   }
9092 
9093   /// See AbstractAttribute::trackStatistics()
9094   void trackStatistics() const override {
9095     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9096   }
9097 };
9098 
9099 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9100   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9101       : AAPotentialValuesImpl(IRP, A) {}
9102 
9103   /// See AbstractAttribute::initialize(...).
9104   ChangeStatus updateImpl(Attributor &A) override {
9105     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9106                      "not be called");
9107   }
9108 
9109   /// See AbstractAttribute::trackStatistics()
9110   void trackStatistics() const override {
9111     STATS_DECLTRACK_FN_ATTR(potential_values)
9112   }
9113 };
9114 
9115 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9116   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9117       : AAPotentialValuesFunction(IRP, A) {}
9118 
9119   /// See AbstractAttribute::trackStatistics()
9120   void trackStatistics() const override {
9121     STATS_DECLTRACK_CS_ATTR(potential_values)
9122   }
9123 };
9124 
9125 struct AAPotentialValuesCallSiteReturned
9126     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9127   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9128       : AACallSiteReturnedFromReturned<AAPotentialValues,
9129                                        AAPotentialValuesImpl>(IRP, A) {}
9130 
9131   /// See AbstractAttribute::trackStatistics()
9132   void trackStatistics() const override {
9133     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9134   }
9135 };
9136 
9137 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9138   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9139       : AAPotentialValuesFloating(IRP, A) {}
9140 
9141   /// See AbstractAttribute::initialize(..).
9142   void initialize(Attributor &A) override {
9143     AAPotentialValuesImpl::initialize(A);
9144     if (isAtFixpoint())
9145       return;
9146 
9147     Value &V = getAssociatedValue();
9148 
9149     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9150       unionAssumed(C->getValue());
9151       indicateOptimisticFixpoint();
9152       return;
9153     }
9154 
9155     if (isa<UndefValue>(&V)) {
9156       unionAssumedWithUndef();
9157       indicateOptimisticFixpoint();
9158       return;
9159     }
9160   }
9161 
9162   /// See AbstractAttribute::updateImpl(...).
9163   ChangeStatus updateImpl(Attributor &A) override {
9164     Value &V = getAssociatedValue();
9165     auto AssumedBefore = getAssumed();
9166     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9167                                              DepClassTy::REQUIRED);
9168     const auto &S = AA.getAssumed();
9169     unionAssumed(S);
9170     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9171                                          : ChangeStatus::CHANGED;
9172   }
9173 
9174   /// See AbstractAttribute::trackStatistics()
9175   void trackStatistics() const override {
9176     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9177   }
9178 };
9179 
9180 /// ------------------------ NoUndef Attribute ---------------------------------
9181 struct AANoUndefImpl : AANoUndef {
9182   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9183 
9184   /// See AbstractAttribute::initialize(...).
9185   void initialize(Attributor &A) override {
9186     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9187       indicateOptimisticFixpoint();
9188       return;
9189     }
9190     Value &V = getAssociatedValue();
9191     if (isa<UndefValue>(V))
9192       indicatePessimisticFixpoint();
9193     else if (isa<FreezeInst>(V))
9194       indicateOptimisticFixpoint();
9195     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9196              isGuaranteedNotToBeUndefOrPoison(&V))
9197       indicateOptimisticFixpoint();
9198     else
9199       AANoUndef::initialize(A);
9200   }
9201 
9202   /// See followUsesInMBEC
9203   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9204                        AANoUndef::StateType &State) {
9205     const Value *UseV = U->get();
9206     const DominatorTree *DT = nullptr;
9207     AssumptionCache *AC = nullptr;
9208     InformationCache &InfoCache = A.getInfoCache();
9209     if (Function *F = getAnchorScope()) {
9210       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9211       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9212     }
9213     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9214     bool TrackUse = false;
9215     // Track use for instructions which must produce undef or poison bits when
9216     // at least one operand contains such bits.
9217     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9218       TrackUse = true;
9219     return TrackUse;
9220   }
9221 
9222   /// See AbstractAttribute::getAsStr().
9223   const std::string getAsStr() const override {
9224     return getAssumed() ? "noundef" : "may-undef-or-poison";
9225   }
9226 
9227   ChangeStatus manifest(Attributor &A) override {
9228     // We don't manifest noundef attribute for dead positions because the
9229     // associated values with dead positions would be replaced with undef
9230     // values.
9231     bool UsedAssumedInformation = false;
9232     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9233                         UsedAssumedInformation))
9234       return ChangeStatus::UNCHANGED;
9235     // A position whose simplified value does not have any value is
9236     // considered to be dead. We don't manifest noundef in such positions for
9237     // the same reason above.
9238     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9239              .hasValue())
9240       return ChangeStatus::UNCHANGED;
9241     return AANoUndef::manifest(A);
9242   }
9243 };
9244 
9245 struct AANoUndefFloating : public AANoUndefImpl {
9246   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9247       : AANoUndefImpl(IRP, A) {}
9248 
9249   /// See AbstractAttribute::initialize(...).
9250   void initialize(Attributor &A) override {
9251     AANoUndefImpl::initialize(A);
9252     if (!getState().isAtFixpoint())
9253       if (Instruction *CtxI = getCtxI())
9254         followUsesInMBEC(*this, A, getState(), *CtxI);
9255   }
9256 
9257   /// See AbstractAttribute::updateImpl(...).
9258   ChangeStatus updateImpl(Attributor &A) override {
9259     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9260                             AANoUndef::StateType &T, bool Stripped) -> bool {
9261       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9262                                              DepClassTy::REQUIRED);
9263       if (!Stripped && this == &AA) {
9264         T.indicatePessimisticFixpoint();
9265       } else {
9266         const AANoUndef::StateType &S =
9267             static_cast<const AANoUndef::StateType &>(AA.getState());
9268         T ^= S;
9269       }
9270       return T.isValidState();
9271     };
9272 
9273     StateType T;
9274     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9275                                           VisitValueCB, getCtxI()))
9276       return indicatePessimisticFixpoint();
9277 
9278     return clampStateAndIndicateChange(getState(), T);
9279   }
9280 
9281   /// See AbstractAttribute::trackStatistics()
9282   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9283 };
9284 
9285 struct AANoUndefReturned final
9286     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9287   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9288       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9289 
9290   /// See AbstractAttribute::trackStatistics()
9291   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9292 };
9293 
9294 struct AANoUndefArgument final
9295     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9296   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9297       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9298 
9299   /// See AbstractAttribute::trackStatistics()
9300   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9301 };
9302 
9303 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9304   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9305       : AANoUndefFloating(IRP, A) {}
9306 
9307   /// See AbstractAttribute::trackStatistics()
9308   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9309 };
9310 
9311 struct AANoUndefCallSiteReturned final
9312     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9313   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9314       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9315 
9316   /// See AbstractAttribute::trackStatistics()
9317   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9318 };
9319 
9320 struct AACallEdgesImpl : public AACallEdges {
9321   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9322 
9323   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9324     return CalledFunctions;
9325   }
9326 
9327   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9328 
9329   virtual bool hasNonAsmUnknownCallee() const override {
9330     return HasUnknownCalleeNonAsm;
9331   }
9332 
9333   const std::string getAsStr() const override {
9334     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9335            std::to_string(CalledFunctions.size()) + "]";
9336   }
9337 
9338   void trackStatistics() const override {}
9339 
9340 protected:
9341   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9342     if (CalledFunctions.insert(Fn)) {
9343       Change = ChangeStatus::CHANGED;
9344       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9345                         << "\n");
9346     }
9347   }
9348 
9349   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9350     if (!HasUnknownCallee)
9351       Change = ChangeStatus::CHANGED;
9352     if (NonAsm && !HasUnknownCalleeNonAsm)
9353       Change = ChangeStatus::CHANGED;
9354     HasUnknownCalleeNonAsm |= NonAsm;
9355     HasUnknownCallee = true;
9356   }
9357 
9358 private:
9359   /// Optimistic set of functions that might be called by this position.
9360   SetVector<Function *> CalledFunctions;
9361 
9362   /// Is there any call with a unknown callee.
9363   bool HasUnknownCallee = false;
9364 
9365   /// Is there any call with a unknown callee, excluding any inline asm.
9366   bool HasUnknownCalleeNonAsm = false;
9367 };
9368 
9369 struct AACallEdgesCallSite : public AACallEdgesImpl {
9370   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9371       : AACallEdgesImpl(IRP, A) {}
9372   /// See AbstractAttribute::updateImpl(...).
9373   ChangeStatus updateImpl(Attributor &A) override {
9374     ChangeStatus Change = ChangeStatus::UNCHANGED;
9375 
9376     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9377                           bool Stripped) -> bool {
9378       if (Function *Fn = dyn_cast<Function>(&V)) {
9379         addCalledFunction(Fn, Change);
9380       } else {
9381         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9382         setHasUnknownCallee(true, Change);
9383       }
9384 
9385       // Explore all values.
9386       return true;
9387     };
9388 
9389     // Process any value that we might call.
9390     auto ProcessCalledOperand = [&](Value *V) {
9391       bool DummyValue = false;
9392       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9393                                        DummyValue, VisitValue, nullptr,
9394                                        false)) {
9395         // If we haven't gone through all values, assume that there are unknown
9396         // callees.
9397         setHasUnknownCallee(true, Change);
9398       }
9399     };
9400 
9401     CallBase *CB = static_cast<CallBase *>(getCtxI());
9402 
9403     if (CB->isInlineAsm()) {
9404       setHasUnknownCallee(false, Change);
9405       return Change;
9406     }
9407 
9408     // Process callee metadata if available.
9409     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9410       for (auto &Op : MD->operands()) {
9411         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9412         if (Callee)
9413           addCalledFunction(Callee, Change);
9414       }
9415       return Change;
9416     }
9417 
9418     // The most simple case.
9419     ProcessCalledOperand(CB->getCalledOperand());
9420 
9421     // Process callback functions.
9422     SmallVector<const Use *, 4u> CallbackUses;
9423     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9424     for (const Use *U : CallbackUses)
9425       ProcessCalledOperand(U->get());
9426 
9427     return Change;
9428   }
9429 };
9430 
9431 struct AACallEdgesFunction : public AACallEdgesImpl {
9432   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9433       : AACallEdgesImpl(IRP, A) {}
9434 
9435   /// See AbstractAttribute::updateImpl(...).
9436   ChangeStatus updateImpl(Attributor &A) override {
9437     ChangeStatus Change = ChangeStatus::UNCHANGED;
9438 
9439     auto ProcessCallInst = [&](Instruction &Inst) {
9440       CallBase &CB = static_cast<CallBase &>(Inst);
9441 
9442       auto &CBEdges = A.getAAFor<AACallEdges>(
9443           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9444       if (CBEdges.hasNonAsmUnknownCallee())
9445         setHasUnknownCallee(true, Change);
9446       if (CBEdges.hasUnknownCallee())
9447         setHasUnknownCallee(false, Change);
9448 
9449       for (Function *F : CBEdges.getOptimisticEdges())
9450         addCalledFunction(F, Change);
9451 
9452       return true;
9453     };
9454 
9455     // Visit all callable instructions.
9456     bool UsedAssumedInformation = false;
9457     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9458                                            UsedAssumedInformation)) {
9459       // If we haven't looked at all call like instructions, assume that there
9460       // are unknown callees.
9461       setHasUnknownCallee(true, Change);
9462     }
9463 
9464     return Change;
9465   }
9466 };
9467 
9468 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9469 private:
9470   struct QuerySet {
9471     void markReachable(Function *Fn) {
9472       Reachable.insert(Fn);
9473       Unreachable.erase(Fn);
9474     }
9475 
9476     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9477                         ArrayRef<const AACallEdges *> AAEdgesList) {
9478       ChangeStatus Change = ChangeStatus::UNCHANGED;
9479 
9480       for (auto *AAEdges : AAEdgesList) {
9481         if (AAEdges->hasUnknownCallee()) {
9482           if (!CanReachUnknownCallee)
9483             Change = ChangeStatus::CHANGED;
9484           CanReachUnknownCallee = true;
9485           return Change;
9486         }
9487       }
9488 
9489       for (Function *Fn : make_early_inc_range(Unreachable)) {
9490         if (checkIfReachable(A, AA, AAEdgesList, Fn)) {
9491           Change = ChangeStatus::CHANGED;
9492           markReachable(Fn);
9493         }
9494       }
9495       return Change;
9496     }
9497 
9498     bool isReachable(Attributor &A, const AAFunctionReachability &AA,
9499                      ArrayRef<const AACallEdges *> AAEdgesList, Function *Fn) {
9500       // Assume that we can reach the function.
9501       // TODO: Be more specific with the unknown callee.
9502       if (CanReachUnknownCallee)
9503         return true;
9504 
9505       if (Reachable.count(Fn))
9506         return true;
9507 
9508       if (Unreachable.count(Fn))
9509         return false;
9510 
9511       // We need to assume that this function can't reach Fn to prevent
9512       // an infinite loop if this function is recursive.
9513       Unreachable.insert(Fn);
9514 
9515       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9516       if (Result)
9517         markReachable(Fn);
9518       return Result;
9519     }
9520 
9521     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9522                           ArrayRef<const AACallEdges *> AAEdgesList,
9523                           Function *Fn) const {
9524 
9525       // Handle the most trivial case first.
9526       for (auto *AAEdges : AAEdgesList) {
9527         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9528 
9529         if (Edges.count(Fn))
9530           return true;
9531       }
9532 
9533       SmallVector<const AAFunctionReachability *, 8> Deps;
9534       for (auto &AAEdges : AAEdgesList) {
9535         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9536 
9537         for (Function *Edge : Edges) {
9538           // We don't need a dependency if the result is reachable.
9539           const AAFunctionReachability &EdgeReachability =
9540               A.getAAFor<AAFunctionReachability>(
9541                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9542           Deps.push_back(&EdgeReachability);
9543 
9544           if (EdgeReachability.canReach(A, Fn))
9545             return true;
9546         }
9547       }
9548 
9549       // The result is false for now, set dependencies and leave.
9550       for (auto Dep : Deps)
9551         A.recordDependence(AA, *Dep, DepClassTy::REQUIRED);
9552 
9553       return false;
9554     }
9555 
9556     /// Set of functions that we know for sure is reachable.
9557     DenseSet<Function *> Reachable;
9558 
9559     /// Set of functions that are unreachable, but might become reachable.
9560     DenseSet<Function *> Unreachable;
9561 
9562     /// If we can reach a function with a call to a unknown function we assume
9563     /// that we can reach any function.
9564     bool CanReachUnknownCallee = false;
9565   };
9566 
9567 public:
9568   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9569       : AAFunctionReachability(IRP, A) {}
9570 
9571   bool canReach(Attributor &A, Function *Fn) const override {
9572     const AACallEdges &AAEdges =
9573         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9574 
9575     // Attributor returns attributes as const, so this function has to be
9576     // const for users of this attribute to use it without having to do
9577     // a const_cast.
9578     // This is a hack for us to be able to cache queries.
9579     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9580     bool Result =
9581         NonConstThis->WholeFunction.isReachable(A, *this, {&AAEdges}, Fn);
9582 
9583     return Result;
9584   }
9585 
9586   /// Can \p CB reach \p Fn
9587   bool canReach(Attributor &A, CallBase &CB, Function *Fn) const override {
9588     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9589         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9590 
9591     // Attributor returns attributes as const, so this function has to be
9592     // const for users of this attribute to use it without having to do
9593     // a const_cast.
9594     // This is a hack for us to be able to cache queries.
9595     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9596     QuerySet &CBQuery = NonConstThis->CBQueries[&CB];
9597 
9598     bool Result = CBQuery.isReachable(A, *this, {&AAEdges}, Fn);
9599 
9600     return Result;
9601   }
9602 
9603   /// See AbstractAttribute::updateImpl(...).
9604   ChangeStatus updateImpl(Attributor &A) override {
9605     const AACallEdges &AAEdges =
9606         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9607     ChangeStatus Change = ChangeStatus::UNCHANGED;
9608 
9609     Change |= WholeFunction.update(A, *this, {&AAEdges});
9610 
9611     for (auto CBPair : CBQueries) {
9612       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9613           *this, IRPosition::callsite_function(*CBPair.first),
9614           DepClassTy::REQUIRED);
9615 
9616       Change |= CBPair.second.update(A, *this, {&AAEdges});
9617     }
9618 
9619     return Change;
9620   }
9621 
9622   const std::string getAsStr() const override {
9623     size_t QueryCount =
9624         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9625 
9626     return "FunctionReachability [" +
9627            std::to_string(WholeFunction.Reachable.size()) + "," +
9628            std::to_string(QueryCount) + "]";
9629   }
9630 
9631   void trackStatistics() const override {}
9632 
9633 private:
9634   bool canReachUnknownCallee() const override {
9635     return WholeFunction.CanReachUnknownCallee;
9636   }
9637 
9638   /// Used to answer if a the whole function can reacha a specific function.
9639   QuerySet WholeFunction;
9640 
9641   /// Used to answer if a call base inside this function can reach a specific
9642   /// function.
9643   DenseMap<CallBase *, QuerySet> CBQueries;
9644 };
9645 
9646 /// ---------------------- Assumption Propagation ------------------------------
9647 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
9648   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
9649                        const DenseSet<StringRef> &Known)
9650       : AAAssumptionInfo(IRP, A, Known) {}
9651 
9652   bool hasAssumption(const StringRef Assumption) const override {
9653     return isValidState() && setContains(Assumption);
9654   }
9655 
9656   /// See AbstractAttribute::getAsStr()
9657   const std::string getAsStr() const override {
9658     const SetContents &Known = getKnown();
9659     const SetContents &Assumed = getAssumed();
9660 
9661     const std::string KnownStr =
9662         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
9663     const std::string AssumedStr =
9664         (Assumed.isUniversal())
9665             ? "Universal"
9666             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
9667 
9668     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
9669   }
9670 };
9671 
9672 /// Propagates assumption information from parent functions to all of their
9673 /// successors. An assumption can be propagated if the containing function
9674 /// dominates the called function.
9675 ///
9676 /// We start with a "known" set of assumptions already valid for the associated
9677 /// function and an "assumed" set that initially contains all possible
9678 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
9679 /// contents as concrete values are known. The concrete values are seeded by the
9680 /// first nodes that are either entries into the call graph, or contains no
9681 /// assumptions. Each node is updated as the intersection of the assumed state
9682 /// with all of its predecessors.
9683 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
9684   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
9685       : AAAssumptionInfoImpl(IRP, A,
9686                              getAssumptions(*IRP.getAssociatedFunction())) {}
9687 
9688   /// See AbstractAttribute::manifest(...).
9689   ChangeStatus manifest(Attributor &A) override {
9690     const auto &Assumptions = getKnown();
9691 
9692     // Don't manifest a universal set if it somehow made it here.
9693     if (Assumptions.isUniversal())
9694       return ChangeStatus::UNCHANGED;
9695 
9696     Function *AssociatedFunction = getAssociatedFunction();
9697 
9698     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
9699 
9700     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
9701   }
9702 
9703   /// See AbstractAttribute::updateImpl(...).
9704   ChangeStatus updateImpl(Attributor &A) override {
9705     bool Changed = false;
9706 
9707     auto CallSitePred = [&](AbstractCallSite ACS) {
9708       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
9709           *this, IRPosition::callsite_function(*ACS.getInstruction()),
9710           DepClassTy::REQUIRED);
9711       // Get the set of assumptions shared by all of this function's callers.
9712       Changed |= getIntersection(AssumptionAA.getAssumed());
9713       return !getAssumed().empty() || !getKnown().empty();
9714     };
9715 
9716     bool AllCallSitesKnown;
9717     // Get the intersection of all assumptions held by this node's predecessors.
9718     // If we don't know all the call sites then this is either an entry into the
9719     // call graph or an empty node. This node is known to only contain its own
9720     // assumptions and can be propagated to its successors.
9721     if (!A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown))
9722       return indicatePessimisticFixpoint();
9723 
9724     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
9725   }
9726 
9727   void trackStatistics() const override {}
9728 };
9729 
9730 /// Assumption Info defined for call sites.
9731 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
9732 
9733   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
9734       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
9735 
9736   /// See AbstractAttribute::initialize(...).
9737   void initialize(Attributor &A) override {
9738     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
9739     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
9740   }
9741 
9742   /// See AbstractAttribute::manifest(...).
9743   ChangeStatus manifest(Attributor &A) override {
9744     // Don't manifest a universal set if it somehow made it here.
9745     if (getKnown().isUniversal())
9746       return ChangeStatus::UNCHANGED;
9747 
9748     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
9749     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
9750 
9751     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
9752   }
9753 
9754   /// See AbstractAttribute::updateImpl(...).
9755   ChangeStatus updateImpl(Attributor &A) override {
9756     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
9757     auto &AssumptionAA =
9758         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
9759     bool Changed = getIntersection(AssumptionAA.getAssumed());
9760     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
9761   }
9762 
9763   /// See AbstractAttribute::trackStatistics()
9764   void trackStatistics() const override {}
9765 
9766 private:
9767   /// Helper to initialized the known set as all the assumptions this call and
9768   /// the callee contain.
9769   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
9770     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
9771     auto Assumptions = getAssumptions(CB);
9772     if (Function *F = IRP.getAssociatedFunction())
9773       set_union(Assumptions, getAssumptions(*F));
9774     if (Function *F = IRP.getAssociatedFunction())
9775       set_union(Assumptions, getAssumptions(*F));
9776     return Assumptions;
9777   }
9778 };
9779 
9780 } // namespace
9781 
9782 AACallGraphNode *AACallEdgeIterator::operator*() const {
9783   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
9784       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
9785 }
9786 
9787 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
9788 
9789 const char AAReturnedValues::ID = 0;
9790 const char AANoUnwind::ID = 0;
9791 const char AANoSync::ID = 0;
9792 const char AANoFree::ID = 0;
9793 const char AANonNull::ID = 0;
9794 const char AANoRecurse::ID = 0;
9795 const char AAWillReturn::ID = 0;
9796 const char AAUndefinedBehavior::ID = 0;
9797 const char AANoAlias::ID = 0;
9798 const char AAReachability::ID = 0;
9799 const char AANoReturn::ID = 0;
9800 const char AAIsDead::ID = 0;
9801 const char AADereferenceable::ID = 0;
9802 const char AAAlign::ID = 0;
9803 const char AANoCapture::ID = 0;
9804 const char AAValueSimplify::ID = 0;
9805 const char AAHeapToStack::ID = 0;
9806 const char AAPrivatizablePtr::ID = 0;
9807 const char AAMemoryBehavior::ID = 0;
9808 const char AAMemoryLocation::ID = 0;
9809 const char AAValueConstantRange::ID = 0;
9810 const char AAPotentialValues::ID = 0;
9811 const char AANoUndef::ID = 0;
9812 const char AACallEdges::ID = 0;
9813 const char AAFunctionReachability::ID = 0;
9814 const char AAPointerInfo::ID = 0;
9815 const char AAAssumptionInfo::ID = 0;
9816 
9817 // Macro magic to create the static generator function for attributes that
9818 // follow the naming scheme.
9819 
9820 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
9821   case IRPosition::PK:                                                         \
9822     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
9823 
9824 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
9825   case IRPosition::PK:                                                         \
9826     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
9827     ++NumAAs;                                                                  \
9828     break;
9829 
9830 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
9831   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9832     CLASS *AA = nullptr;                                                       \
9833     switch (IRP.getPositionKind()) {                                           \
9834       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9835       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9836       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9837       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9838       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9839       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9840       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9841       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9842     }                                                                          \
9843     return *AA;                                                                \
9844   }
9845 
9846 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
9847   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9848     CLASS *AA = nullptr;                                                       \
9849     switch (IRP.getPositionKind()) {                                           \
9850       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9851       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
9852       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9853       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9854       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9855       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9856       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9857       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9858     }                                                                          \
9859     return *AA;                                                                \
9860   }
9861 
9862 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
9863   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9864     CLASS *AA = nullptr;                                                       \
9865     switch (IRP.getPositionKind()) {                                           \
9866       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9867       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9868       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9869       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9870       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9871       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9872       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9873       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9874     }                                                                          \
9875     return *AA;                                                                \
9876   }
9877 
9878 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
9879   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9880     CLASS *AA = nullptr;                                                       \
9881     switch (IRP.getPositionKind()) {                                           \
9882       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9883       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9884       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9885       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9886       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9887       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9888       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9889       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9890     }                                                                          \
9891     return *AA;                                                                \
9892   }
9893 
9894 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
9895   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9896     CLASS *AA = nullptr;                                                       \
9897     switch (IRP.getPositionKind()) {                                           \
9898       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9899       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9900       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9901       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9902       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9903       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9904       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9905       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9906     }                                                                          \
9907     return *AA;                                                                \
9908   }
9909 
9910 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
9911 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
9912 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
9913 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
9914 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
9915 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
9916 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
9917 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
9918 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
9919 
9920 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
9921 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
9922 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
9923 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
9924 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
9925 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
9926 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
9927 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
9928 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
9929 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
9930 
9931 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
9932 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
9933 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
9934 
9935 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
9936 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
9937 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
9938 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
9939 
9940 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
9941 
9942 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
9943 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
9944 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
9945 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
9946 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
9947 #undef SWITCH_PK_CREATE
9948 #undef SWITCH_PK_INV
9949