xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/IPO/AttributorAttributes.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 static Optional<ConstantInt *>
getAssumedConstantInt(Attributor & A,const Value & V,const AbstractAttribute & AA,bool & UsedAssumedInformation)144 getAssumedConstantInt(Attributor &A, const Value &V,
145                       const AbstractAttribute &AA,
146                       bool &UsedAssumedInformation) {
147   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148   if (C.hasValue())
149     return dyn_cast_or_null<ConstantInt>(C.getValue());
150   return llvm::None;
151 }
152 
153 /// Get pointer operand of memory accessing instruction. If \p I is
154 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155 /// is set to false and the instruction is volatile, return nullptr.
getPointerOperand(const Instruction * I,bool AllowVolatile)156 static const Value *getPointerOperand(const Instruction *I,
157                                       bool AllowVolatile) {
158   if (!AllowVolatile && I->isVolatile())
159     return nullptr;
160 
161   if (auto *LI = dyn_cast<LoadInst>(I)) {
162     return LI->getPointerOperand();
163   }
164 
165   if (auto *SI = dyn_cast<StoreInst>(I)) {
166     return SI->getPointerOperand();
167   }
168 
169   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
170     return CXI->getPointerOperand();
171   }
172 
173   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
174     return RMWI->getPointerOperand();
175   }
176 
177   return nullptr;
178 }
179 
180 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
181 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
182 /// getelement pointer instructions that traverse the natural type of \p Ptr if
183 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
184 /// through a cast to i8*.
185 ///
186 /// TODO: This could probably live somewhere more prominantly if it doesn't
187 ///       already exist.
constructPointer(Type * ResTy,Type * PtrElemTy,Value * Ptr,int64_t Offset,IRBuilder<NoFolder> & IRB,const DataLayout & DL)188 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
189                                int64_t Offset, IRBuilder<NoFolder> &IRB,
190                                const DataLayout &DL) {
191   assert(Offset >= 0 && "Negative offset not supported yet!");
192   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
193                     << "-bytes as " << *ResTy << "\n");
194 
195   if (Offset) {
196     SmallVector<Value *, 4> Indices;
197     std::string GEPName = Ptr->getName().str() + ".0";
198 
199     // Add 0 index to look through the pointer.
200     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
201            "Offset out of bounds");
202     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
203 
204     Type *Ty = PtrElemTy;
205     do {
206       auto *STy = dyn_cast<StructType>(Ty);
207       if (!STy)
208         // Non-aggregate type, we cast and make byte-wise progress now.
209         break;
210 
211       const StructLayout *SL = DL.getStructLayout(STy);
212       if (int64_t(SL->getSizeInBytes()) < Offset)
213         break;
214 
215       uint64_t Idx = SL->getElementContainingOffset(Offset);
216       assert(Idx < STy->getNumElements() && "Offset calculation error!");
217       uint64_t Rem = Offset - SL->getElementOffset(Idx);
218       Ty = STy->getElementType(Idx);
219 
220       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
221                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
222 
223       GEPName += "." + std::to_string(Idx);
224       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
225       Offset = Rem;
226     } while (Offset);
227 
228     // Create a GEP for the indices collected above.
229     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
230 
231     // If an offset is left we use byte-wise adjustment.
232     if (Offset) {
233       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
234       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
235                           GEPName + ".b" + Twine(Offset));
236     }
237   }
238 
239   // Ensure the result has the requested type.
240   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
241 
242   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
243   return Ptr;
244 }
245 
246 /// Recursively visit all values that might become \p IRP at some point. This
247 /// will be done by looking through cast instructions, selects, phis, and calls
248 /// with the "returned" attribute. Once we cannot look through the value any
249 /// further, the callback \p VisitValueCB is invoked and passed the current
250 /// value, the \p State, and a flag to indicate if we stripped anything.
251 /// Stripped means that we unpacked the value associated with \p IRP at least
252 /// once. Note that the value used for the callback may still be the value
253 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
254 /// we will never visit more values than specified by \p MaxValues.
255 template <typename AAType, typename StateTy>
genericValueTraversal(Attributor & A,IRPosition IRP,const AAType & QueryingAA,StateTy & State,function_ref<bool (Value &,const Instruction *,StateTy &,bool)> VisitValueCB,const Instruction * CtxI,bool UseValueSimplify=true,int MaxValues=16,function_ref<Value * (Value *)> StripCB=nullptr)256 static bool genericValueTraversal(
257     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
258     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
259         VisitValueCB,
260     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
261     function_ref<Value *(Value *)> StripCB = nullptr) {
262 
263   const AAIsDead *LivenessAA = nullptr;
264   if (IRP.getAnchorScope())
265     LivenessAA = &A.getAAFor<AAIsDead>(
266         QueryingAA,
267         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
268         DepClassTy::NONE);
269   bool AnyDead = false;
270 
271   using Item = std::pair<Value *, const Instruction *>;
272   SmallSet<Item, 16> Visited;
273   SmallVector<Item, 16> Worklist;
274   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
275 
276   int Iteration = 0;
277   do {
278     Item I = Worklist.pop_back_val();
279     Value *V = I.first;
280     CtxI = I.second;
281     if (StripCB)
282       V = StripCB(V);
283 
284     // Check if we should process the current value. To prevent endless
285     // recursion keep a record of the values we followed!
286     if (!Visited.insert(I).second)
287       continue;
288 
289     // Make sure we limit the compile time for complex expressions.
290     if (Iteration++ >= MaxValues)
291       return false;
292 
293     // Explicitly look through calls with a "returned" attribute if we do
294     // not have a pointer as stripPointerCasts only works on them.
295     Value *NewV = nullptr;
296     if (V->getType()->isPointerTy()) {
297       NewV = V->stripPointerCasts();
298     } else {
299       auto *CB = dyn_cast<CallBase>(V);
300       if (CB && CB->getCalledFunction()) {
301         for (Argument &Arg : CB->getCalledFunction()->args())
302           if (Arg.hasReturnedAttr()) {
303             NewV = CB->getArgOperand(Arg.getArgNo());
304             break;
305           }
306       }
307     }
308     if (NewV && NewV != V) {
309       Worklist.push_back({NewV, CtxI});
310       continue;
311     }
312 
313     // Look through select instructions, visit both potential values.
314     if (auto *SI = dyn_cast<SelectInst>(V)) {
315       Worklist.push_back({SI->getTrueValue(), CtxI});
316       Worklist.push_back({SI->getFalseValue(), CtxI});
317       continue;
318     }
319 
320     // Look through phi nodes, visit all live operands.
321     if (auto *PHI = dyn_cast<PHINode>(V)) {
322       assert(LivenessAA &&
323              "Expected liveness in the presence of instructions!");
324       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
325         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
326         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
327                             LivenessAA,
328                             /* CheckBBLivenessOnly */ true)) {
329           AnyDead = true;
330           continue;
331         }
332         Worklist.push_back(
333             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
334       }
335       continue;
336     }
337 
338     if (UseValueSimplify && !isa<Constant>(V)) {
339       bool UsedAssumedInformation = false;
340       Optional<Constant *> C =
341           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
342       if (!C.hasValue())
343         continue;
344       if (Value *NewV = C.getValue()) {
345         Worklist.push_back({NewV, CtxI});
346         continue;
347       }
348     }
349 
350     // Once a leaf is reached we inform the user through the callback.
351     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
352       return false;
353   } while (!Worklist.empty());
354 
355   // If we actually used liveness information so we have to record a dependence.
356   if (AnyDead)
357     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
358 
359   // All values have been visited.
360   return true;
361 }
362 
stripAndAccumulateMinimalOffsets(Attributor & A,const AbstractAttribute & QueryingAA,const Value * Val,const DataLayout & DL,APInt & Offset,bool AllowNonInbounds,bool UseAssumed=false)363 const Value *stripAndAccumulateMinimalOffsets(
364     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
365     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
366     bool UseAssumed = false) {
367 
368   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
369     const IRPosition &Pos = IRPosition::value(V);
370     // Only track dependence if we are going to use the assumed info.
371     const AAValueConstantRange &ValueConstantRangeAA =
372         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
373                                          UseAssumed ? DepClassTy::OPTIONAL
374                                                     : DepClassTy::NONE);
375     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
376                                      : ValueConstantRangeAA.getKnown();
377     // We can only use the lower part of the range because the upper part can
378     // be higher than what the value can really be.
379     ROffset = Range.getSignedMin();
380     return true;
381   };
382 
383   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
384                                                 AttributorAnalysis);
385 }
386 
getMinimalBaseOfAccsesPointerOperand(Attributor & A,const AbstractAttribute & QueryingAA,const Instruction * I,int64_t & BytesOffset,const DataLayout & DL,bool AllowNonInbounds=false)387 static const Value *getMinimalBaseOfAccsesPointerOperand(
388     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
389     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
390   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
391   if (!Ptr)
392     return nullptr;
393   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
394   const Value *Base = stripAndAccumulateMinimalOffsets(
395       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
396 
397   BytesOffset = OffsetAPInt.getSExtValue();
398   return Base;
399 }
400 
401 static const Value *
getBasePointerOfAccessPointerOperand(const Instruction * I,int64_t & BytesOffset,const DataLayout & DL,bool AllowNonInbounds=false)402 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
403                                      const DataLayout &DL,
404                                      bool AllowNonInbounds = false) {
405   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
406   if (!Ptr)
407     return nullptr;
408 
409   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
410                                           AllowNonInbounds);
411 }
412 
413 /// Helper function to clamp a state \p S of type \p StateType with the
414 /// information in \p R and indicate/return if \p S did change (as-in update is
415 /// required to be run again).
416 template <typename StateType>
clampStateAndIndicateChange(StateType & S,const StateType & R)417 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
418   auto Assumed = S.getAssumed();
419   S ^= R;
420   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
421                                    : ChangeStatus::CHANGED;
422 }
423 
424 /// Clamp the information known for all returned values of a function
425 /// (identified by \p QueryingAA) into \p S.
426 template <typename AAType, typename StateType = typename AAType::StateType>
clampReturnedValueStates(Attributor & A,const AAType & QueryingAA,StateType & S,const IRPosition::CallBaseContext * CBContext=nullptr)427 static void clampReturnedValueStates(
428     Attributor &A, const AAType &QueryingAA, StateType &S,
429     const IRPosition::CallBaseContext *CBContext = nullptr) {
430   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
431                     << QueryingAA << " into " << S << "\n");
432 
433   assert((QueryingAA.getIRPosition().getPositionKind() ==
434               IRPosition::IRP_RETURNED ||
435           QueryingAA.getIRPosition().getPositionKind() ==
436               IRPosition::IRP_CALL_SITE_RETURNED) &&
437          "Can only clamp returned value states for a function returned or call "
438          "site returned position!");
439 
440   // Use an optional state as there might not be any return values and we want
441   // to join (IntegerState::operator&) the state of all there are.
442   Optional<StateType> T;
443 
444   // Callback for each possibly returned value.
445   auto CheckReturnValue = [&](Value &RV) -> bool {
446     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
447     const AAType &AA =
448         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
449     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
450                       << " @ " << RVPos << "\n");
451     const StateType &AAS = AA.getState();
452     if (T.hasValue())
453       *T &= AAS;
454     else
455       T = AAS;
456     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
457                       << "\n");
458     return T->isValidState();
459   };
460 
461   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
462     S.indicatePessimisticFixpoint();
463   else if (T.hasValue())
464     S ^= *T;
465 }
466 
467 /// Helper class for generic deduction: return value -> returned position.
468 template <typename AAType, typename BaseType,
469           typename StateType = typename BaseType::StateType,
470           bool PropagateCallBaseContext = false>
471 struct AAReturnedFromReturnedValues : public BaseType {
AAReturnedFromReturnedValues__anon811b40a70111::AAReturnedFromReturnedValues472   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
473       : BaseType(IRP, A) {}
474 
475   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAReturnedFromReturnedValues476   ChangeStatus updateImpl(Attributor &A) override {
477     StateType S(StateType::getBestState(this->getState()));
478     clampReturnedValueStates<AAType, StateType>(
479         A, *this, S,
480         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
481     // TODO: If we know we visited all returned values, thus no are assumed
482     // dead, we can take the known information from the state T.
483     return clampStateAndIndicateChange<StateType>(this->getState(), S);
484   }
485 };
486 
487 /// Clamp the information known at all call sites for a given argument
488 /// (identified by \p QueryingAA) into \p S.
489 template <typename AAType, typename StateType = typename AAType::StateType>
clampCallSiteArgumentStates(Attributor & A,const AAType & QueryingAA,StateType & S)490 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
491                                         StateType &S) {
492   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
493                     << QueryingAA << " into " << S << "\n");
494 
495   assert(QueryingAA.getIRPosition().getPositionKind() ==
496              IRPosition::IRP_ARGUMENT &&
497          "Can only clamp call site argument states for an argument position!");
498 
499   // Use an optional state as there might not be any return values and we want
500   // to join (IntegerState::operator&) the state of all there are.
501   Optional<StateType> T;
502 
503   // The argument number which is also the call site argument number.
504   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
505 
506   auto CallSiteCheck = [&](AbstractCallSite ACS) {
507     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
508     // Check if a coresponding argument was found or if it is on not associated
509     // (which can happen for callback calls).
510     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
511       return false;
512 
513     const AAType &AA =
514         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
515     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
516                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
517     const StateType &AAS = AA.getState();
518     if (T.hasValue())
519       *T &= AAS;
520     else
521       T = AAS;
522     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
523                       << "\n");
524     return T->isValidState();
525   };
526 
527   bool AllCallSitesKnown;
528   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
529                               AllCallSitesKnown))
530     S.indicatePessimisticFixpoint();
531   else if (T.hasValue())
532     S ^= *T;
533 }
534 
535 /// This function is the bridge between argument position and the call base
536 /// context.
537 template <typename AAType, typename BaseType,
538           typename StateType = typename AAType::StateType>
getArgumentStateFromCallBaseContext(Attributor & A,BaseType & QueryingAttribute,IRPosition & Pos,StateType & State)539 bool getArgumentStateFromCallBaseContext(Attributor &A,
540                                          BaseType &QueryingAttribute,
541                                          IRPosition &Pos, StateType &State) {
542   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
543          "Expected an 'argument' position !");
544   const CallBase *CBContext = Pos.getCallBaseContext();
545   if (!CBContext)
546     return false;
547 
548   int ArgNo = Pos.getCallSiteArgNo();
549   assert(ArgNo >= 0 && "Invalid Arg No!");
550 
551   const auto &AA = A.getAAFor<AAType>(
552       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
553       DepClassTy::REQUIRED);
554   const StateType &CBArgumentState =
555       static_cast<const StateType &>(AA.getState());
556 
557   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
558                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
559                     << "\n");
560 
561   // NOTE: If we want to do call site grouping it should happen here.
562   State ^= CBArgumentState;
563   return true;
564 }
565 
566 /// Helper class for generic deduction: call site argument -> argument position.
567 template <typename AAType, typename BaseType,
568           typename StateType = typename AAType::StateType,
569           bool BridgeCallBaseContext = false>
570 struct AAArgumentFromCallSiteArguments : public BaseType {
AAArgumentFromCallSiteArguments__anon811b40a70111::AAArgumentFromCallSiteArguments571   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
572       : BaseType(IRP, A) {}
573 
574   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAArgumentFromCallSiteArguments575   ChangeStatus updateImpl(Attributor &A) override {
576     StateType S = StateType::getBestState(this->getState());
577 
578     if (BridgeCallBaseContext) {
579       bool Success =
580           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
581               A, *this, this->getIRPosition(), S);
582       if (Success)
583         return clampStateAndIndicateChange<StateType>(this->getState(), S);
584     }
585     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
586 
587     // TODO: If we know we visited all incoming values, thus no are assumed
588     // dead, we can take the known information from the state T.
589     return clampStateAndIndicateChange<StateType>(this->getState(), S);
590   }
591 };
592 
593 /// Helper class for generic replication: function returned -> cs returned.
594 template <typename AAType, typename BaseType,
595           typename StateType = typename BaseType::StateType,
596           bool IntroduceCallBaseContext = false>
597 struct AACallSiteReturnedFromReturned : public BaseType {
AACallSiteReturnedFromReturned__anon811b40a70111::AACallSiteReturnedFromReturned598   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
599       : BaseType(IRP, A) {}
600 
601   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AACallSiteReturnedFromReturned602   ChangeStatus updateImpl(Attributor &A) override {
603     assert(this->getIRPosition().getPositionKind() ==
604                IRPosition::IRP_CALL_SITE_RETURNED &&
605            "Can only wrap function returned positions for call site returned "
606            "positions!");
607     auto &S = this->getState();
608 
609     const Function *AssociatedFunction =
610         this->getIRPosition().getAssociatedFunction();
611     if (!AssociatedFunction)
612       return S.indicatePessimisticFixpoint();
613 
614     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
615     if (IntroduceCallBaseContext)
616       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
617                         << CBContext << "\n");
618 
619     IRPosition FnPos = IRPosition::returned(
620         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
621     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
622     return clampStateAndIndicateChange(S, AA.getState());
623   }
624 };
625 
626 /// Helper function to accumulate uses.
627 template <class AAType, typename StateType = typename AAType::StateType>
followUsesInContext(AAType & AA,Attributor & A,MustBeExecutedContextExplorer & Explorer,const Instruction * CtxI,SetVector<const Use * > & Uses,StateType & State)628 static void followUsesInContext(AAType &AA, Attributor &A,
629                                 MustBeExecutedContextExplorer &Explorer,
630                                 const Instruction *CtxI,
631                                 SetVector<const Use *> &Uses,
632                                 StateType &State) {
633   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
634   for (unsigned u = 0; u < Uses.size(); ++u) {
635     const Use *U = Uses[u];
636     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
637       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
638       if (Found && AA.followUseInMBEC(A, U, UserI, State))
639         for (const Use &Us : UserI->uses())
640           Uses.insert(&Us);
641     }
642   }
643 }
644 
645 /// Use the must-be-executed-context around \p I to add information into \p S.
646 /// The AAType class is required to have `followUseInMBEC` method with the
647 /// following signature and behaviour:
648 ///
649 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
650 /// U - Underlying use.
651 /// I - The user of the \p U.
652 /// Returns true if the value should be tracked transitively.
653 ///
654 template <class AAType, typename StateType = typename AAType::StateType>
followUsesInMBEC(AAType & AA,Attributor & A,StateType & S,Instruction & CtxI)655 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
656                              Instruction &CtxI) {
657 
658   // Container for (transitive) uses of the associated value.
659   SetVector<const Use *> Uses;
660   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
661     Uses.insert(&U);
662 
663   MustBeExecutedContextExplorer &Explorer =
664       A.getInfoCache().getMustBeExecutedContextExplorer();
665 
666   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
667 
668   if (S.isAtFixpoint())
669     return;
670 
671   SmallVector<const BranchInst *, 4> BrInsts;
672   auto Pred = [&](const Instruction *I) {
673     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
674       if (Br->isConditional())
675         BrInsts.push_back(Br);
676     return true;
677   };
678 
679   // Here, accumulate conditional branch instructions in the context. We
680   // explore the child paths and collect the known states. The disjunction of
681   // those states can be merged to its own state. Let ParentState_i be a state
682   // to indicate the known information for an i-th branch instruction in the
683   // context. ChildStates are created for its successors respectively.
684   //
685   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
686   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
687   //      ...
688   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
689   //
690   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
691   //
692   // FIXME: Currently, recursive branches are not handled. For example, we
693   // can't deduce that ptr must be dereferenced in below function.
694   //
695   // void f(int a, int c, int *ptr) {
696   //    if(a)
697   //      if (b) {
698   //        *ptr = 0;
699   //      } else {
700   //        *ptr = 1;
701   //      }
702   //    else {
703   //      if (b) {
704   //        *ptr = 0;
705   //      } else {
706   //        *ptr = 1;
707   //      }
708   //    }
709   // }
710 
711   Explorer.checkForAllContext(&CtxI, Pred);
712   for (const BranchInst *Br : BrInsts) {
713     StateType ParentState;
714 
715     // The known state of the parent state is a conjunction of children's
716     // known states so it is initialized with a best state.
717     ParentState.indicateOptimisticFixpoint();
718 
719     for (const BasicBlock *BB : Br->successors()) {
720       StateType ChildState;
721 
722       size_t BeforeSize = Uses.size();
723       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
724 
725       // Erase uses which only appear in the child.
726       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
727         It = Uses.erase(It);
728 
729       ParentState &= ChildState;
730     }
731 
732     // Use only known state.
733     S += ParentState;
734   }
735 }
736 
737 /// -----------------------NoUnwind Function Attribute--------------------------
738 
739 struct AANoUnwindImpl : AANoUnwind {
AANoUnwindImpl__anon811b40a70111::AANoUnwindImpl740   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
741 
getAsStr__anon811b40a70111::AANoUnwindImpl742   const std::string getAsStr() const override {
743     return getAssumed() ? "nounwind" : "may-unwind";
744   }
745 
746   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoUnwindImpl747   ChangeStatus updateImpl(Attributor &A) override {
748     auto Opcodes = {
749         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
750         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
751         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
752 
753     auto CheckForNoUnwind = [&](Instruction &I) {
754       if (!I.mayThrow())
755         return true;
756 
757       if (const auto *CB = dyn_cast<CallBase>(&I)) {
758         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
759             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
760         return NoUnwindAA.isAssumedNoUnwind();
761       }
762       return false;
763     };
764 
765     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
766       return indicatePessimisticFixpoint();
767 
768     return ChangeStatus::UNCHANGED;
769   }
770 };
771 
772 struct AANoUnwindFunction final : public AANoUnwindImpl {
AANoUnwindFunction__anon811b40a70111::AANoUnwindFunction773   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
774       : AANoUnwindImpl(IRP, A) {}
775 
776   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoUnwindFunction777   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
778 };
779 
780 /// NoUnwind attribute deduction for a call sites.
781 struct AANoUnwindCallSite final : AANoUnwindImpl {
AANoUnwindCallSite__anon811b40a70111::AANoUnwindCallSite782   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
783       : AANoUnwindImpl(IRP, A) {}
784 
785   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoUnwindCallSite786   void initialize(Attributor &A) override {
787     AANoUnwindImpl::initialize(A);
788     Function *F = getAssociatedFunction();
789     if (!F || F->isDeclaration())
790       indicatePessimisticFixpoint();
791   }
792 
793   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoUnwindCallSite794   ChangeStatus updateImpl(Attributor &A) override {
795     // TODO: Once we have call site specific value information we can provide
796     //       call site specific liveness information and then it makes
797     //       sense to specialize attributes for call sites arguments instead of
798     //       redirecting requests to the callee argument.
799     Function *F = getAssociatedFunction();
800     const IRPosition &FnPos = IRPosition::function(*F);
801     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
802     return clampStateAndIndicateChange(getState(), FnAA.getState());
803   }
804 
805   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoUnwindCallSite806   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
807 };
808 
809 /// --------------------- Function Return Values -------------------------------
810 
811 /// "Attribute" that collects all potential returned values and the return
812 /// instructions that they arise from.
813 ///
814 /// If there is a unique returned value R, the manifest method will:
815 ///   - mark R with the "returned" attribute, if R is an argument.
816 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
817 
818   /// Mapping of values potentially returned by the associated function to the
819   /// return instructions that might return them.
820   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
821 
822   /// Mapping to remember the number of returned values for a call site such
823   /// that we can avoid updates if nothing changed.
824   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
825 
826   /// Set of unresolved calls returned by the associated function.
827   SmallSetVector<CallBase *, 4> UnresolvedCalls;
828 
829   /// State flags
830   ///
831   ///{
832   bool IsFixed = false;
833   bool IsValidState = true;
834   ///}
835 
836 public:
AAReturnedValuesImpl(const IRPosition & IRP,Attributor & A)837   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
838       : AAReturnedValues(IRP, A) {}
839 
840   /// See AbstractAttribute::initialize(...).
initialize(Attributor & A)841   void initialize(Attributor &A) override {
842     // Reset the state.
843     IsFixed = false;
844     IsValidState = true;
845     ReturnedValues.clear();
846 
847     Function *F = getAssociatedFunction();
848     if (!F || F->isDeclaration()) {
849       indicatePessimisticFixpoint();
850       return;
851     }
852     assert(!F->getReturnType()->isVoidTy() &&
853            "Did not expect a void return type!");
854 
855     // The map from instruction opcodes to those instructions in the function.
856     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
857 
858     // Look through all arguments, if one is marked as returned we are done.
859     for (Argument &Arg : F->args()) {
860       if (Arg.hasReturnedAttr()) {
861         auto &ReturnInstSet = ReturnedValues[&Arg];
862         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
863           for (Instruction *RI : *Insts)
864             ReturnInstSet.insert(cast<ReturnInst>(RI));
865 
866         indicateOptimisticFixpoint();
867         return;
868       }
869     }
870 
871     if (!A.isFunctionIPOAmendable(*F))
872       indicatePessimisticFixpoint();
873   }
874 
875   /// See AbstractAttribute::manifest(...).
876   ChangeStatus manifest(Attributor &A) override;
877 
878   /// See AbstractAttribute::getState(...).
getState()879   AbstractState &getState() override { return *this; }
880 
881   /// See AbstractAttribute::getState(...).
getState() const882   const AbstractState &getState() const override { return *this; }
883 
884   /// See AbstractAttribute::updateImpl(Attributor &A).
885   ChangeStatus updateImpl(Attributor &A) override;
886 
returned_values()887   llvm::iterator_range<iterator> returned_values() override {
888     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
889   }
890 
returned_values() const891   llvm::iterator_range<const_iterator> returned_values() const override {
892     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
893   }
894 
getUnresolvedCalls() const895   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
896     return UnresolvedCalls;
897   }
898 
899   /// Return the number of potential return values, -1 if unknown.
getNumReturnValues() const900   size_t getNumReturnValues() const override {
901     return isValidState() ? ReturnedValues.size() : -1;
902   }
903 
904   /// Return an assumed unique return value if a single candidate is found. If
905   /// there cannot be one, return a nullptr. If it is not clear yet, return the
906   /// Optional::NoneType.
907   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
908 
909   /// See AbstractState::checkForAllReturnedValues(...).
910   bool checkForAllReturnedValuesAndReturnInsts(
911       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
912       const override;
913 
914   /// Pretty print the attribute similar to the IR representation.
915   const std::string getAsStr() const override;
916 
917   /// See AbstractState::isAtFixpoint().
isAtFixpoint() const918   bool isAtFixpoint() const override { return IsFixed; }
919 
920   /// See AbstractState::isValidState().
isValidState() const921   bool isValidState() const override { return IsValidState; }
922 
923   /// See AbstractState::indicateOptimisticFixpoint(...).
indicateOptimisticFixpoint()924   ChangeStatus indicateOptimisticFixpoint() override {
925     IsFixed = true;
926     return ChangeStatus::UNCHANGED;
927   }
928 
indicatePessimisticFixpoint()929   ChangeStatus indicatePessimisticFixpoint() override {
930     IsFixed = true;
931     IsValidState = false;
932     return ChangeStatus::CHANGED;
933   }
934 };
935 
manifest(Attributor & A)936 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
937   ChangeStatus Changed = ChangeStatus::UNCHANGED;
938 
939   // Bookkeeping.
940   assert(isValidState());
941   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
942                   "Number of function with known return values");
943 
944   // Check if we have an assumed unique return value that we could manifest.
945   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
946 
947   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
948     return Changed;
949 
950   // Bookkeeping.
951   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
952                   "Number of function with unique return");
953 
954   // Callback to replace the uses of CB with the constant C.
955   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
956     if (CB.use_empty())
957       return ChangeStatus::UNCHANGED;
958     if (A.changeValueAfterManifest(CB, C))
959       return ChangeStatus::CHANGED;
960     return ChangeStatus::UNCHANGED;
961   };
962 
963   // If the assumed unique return value is an argument, annotate it.
964   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
965     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
966             getAssociatedFunction()->getReturnType())) {
967       getIRPosition() = IRPosition::argument(*UniqueRVArg);
968       Changed = IRAttribute::manifest(A);
969     }
970   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
971     // We can replace the returned value with the unique returned constant.
972     Value &AnchorValue = getAnchorValue();
973     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
974       for (const Use &U : F->uses())
975         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
976           if (CB->isCallee(&U)) {
977             Constant *RVCCast =
978                 CB->getType() == RVC->getType()
979                     ? RVC
980                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
981             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
982           }
983     } else {
984       assert(isa<CallBase>(AnchorValue) &&
985              "Expcected a function or call base anchor!");
986       Constant *RVCCast =
987           AnchorValue.getType() == RVC->getType()
988               ? RVC
989               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
990       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
991     }
992     if (Changed == ChangeStatus::CHANGED)
993       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
994                       "Number of function returns replaced by constant return");
995   }
996 
997   return Changed;
998 }
999 
getAsStr() const1000 const std::string AAReturnedValuesImpl::getAsStr() const {
1001   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1002          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1003          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1004 }
1005 
1006 Optional<Value *>
getAssumedUniqueReturnValue(Attributor & A) const1007 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1008   // If checkForAllReturnedValues provides a unique value, ignoring potential
1009   // undef values that can also be present, it is assumed to be the actual
1010   // return value and forwarded to the caller of this method. If there are
1011   // multiple, a nullptr is returned indicating there cannot be a unique
1012   // returned value.
1013   Optional<Value *> UniqueRV;
1014 
1015   auto Pred = [&](Value &RV) -> bool {
1016     // If we found a second returned value and neither the current nor the saved
1017     // one is an undef, there is no unique returned value. Undefs are special
1018     // since we can pretend they have any value.
1019     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1020         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1021       UniqueRV = nullptr;
1022       return false;
1023     }
1024 
1025     // Do not overwrite a value with an undef.
1026     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1027       UniqueRV = &RV;
1028 
1029     return true;
1030   };
1031 
1032   if (!A.checkForAllReturnedValues(Pred, *this))
1033     UniqueRV = nullptr;
1034 
1035   return UniqueRV;
1036 }
1037 
checkForAllReturnedValuesAndReturnInsts(function_ref<bool (Value &,const SmallSetVector<ReturnInst *,4> &)> Pred) const1038 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1039     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1040     const {
1041   if (!isValidState())
1042     return false;
1043 
1044   // Check all returned values but ignore call sites as long as we have not
1045   // encountered an overdefined one during an update.
1046   for (auto &It : ReturnedValues) {
1047     Value *RV = It.first;
1048 
1049     CallBase *CB = dyn_cast<CallBase>(RV);
1050     if (CB && !UnresolvedCalls.count(CB))
1051       continue;
1052 
1053     if (!Pred(*RV, It.second))
1054       return false;
1055   }
1056 
1057   return true;
1058 }
1059 
updateImpl(Attributor & A)1060 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1061   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1062   bool Changed = false;
1063 
1064   // State used in the value traversals starting in returned values.
1065   struct RVState {
1066     // The map in which we collect return values -> return instrs.
1067     decltype(ReturnedValues) &RetValsMap;
1068     // The flag to indicate a change.
1069     bool &Changed;
1070     // The return instrs we come from.
1071     SmallSetVector<ReturnInst *, 4> RetInsts;
1072   };
1073 
1074   // Callback for a leaf value returned by the associated function.
1075   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1076                          bool) -> bool {
1077     auto Size = RVS.RetValsMap[&Val].size();
1078     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1079     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1080     RVS.Changed |= Inserted;
1081     LLVM_DEBUG({
1082       if (Inserted)
1083         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1084                << " => " << RVS.RetInsts.size() << "\n";
1085     });
1086     return true;
1087   };
1088 
1089   // Helper method to invoke the generic value traversal.
1090   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1091                                 const Instruction *CtxI) {
1092     IRPosition RetValPos = IRPosition::value(RV, getCallBaseContext());
1093     return genericValueTraversal<AAReturnedValues, RVState>(
1094         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1095         /* UseValueSimplify */ false);
1096   };
1097 
1098   // Callback for all "return intructions" live in the associated function.
1099   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1100     ReturnInst &Ret = cast<ReturnInst>(I);
1101     RVState RVS({ReturnedValues, Changed, {}});
1102     RVS.RetInsts.insert(&Ret);
1103     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1104   };
1105 
1106   // Start by discovering returned values from all live returned instructions in
1107   // the associated function.
1108   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1109     return indicatePessimisticFixpoint();
1110 
1111   // Once returned values "directly" present in the code are handled we try to
1112   // resolve returned calls. To avoid modifications to the ReturnedValues map
1113   // while we iterate over it we kept record of potential new entries in a copy
1114   // map, NewRVsMap.
1115   decltype(ReturnedValues) NewRVsMap;
1116 
1117   auto HandleReturnValue = [&](Value *RV,
1118                                SmallSetVector<ReturnInst *, 4> &RIs) {
1119     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1120                       << RIs.size() << " RIs\n");
1121     CallBase *CB = dyn_cast<CallBase>(RV);
1122     if (!CB || UnresolvedCalls.count(CB))
1123       return;
1124 
1125     if (!CB->getCalledFunction()) {
1126       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1127                         << "\n");
1128       UnresolvedCalls.insert(CB);
1129       return;
1130     }
1131 
1132     // TODO: use the function scope once we have call site AAReturnedValues.
1133     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1134         *this, IRPosition::function(*CB->getCalledFunction()),
1135         DepClassTy::REQUIRED);
1136     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1137                       << RetValAA << "\n");
1138 
1139     // Skip dead ends, thus if we do not know anything about the returned
1140     // call we mark it as unresolved and it will stay that way.
1141     if (!RetValAA.getState().isValidState()) {
1142       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1143                         << "\n");
1144       UnresolvedCalls.insert(CB);
1145       return;
1146     }
1147 
1148     // Do not try to learn partial information. If the callee has unresolved
1149     // return values we will treat the call as unresolved/opaque.
1150     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1151     if (!RetValAAUnresolvedCalls.empty()) {
1152       UnresolvedCalls.insert(CB);
1153       return;
1154     }
1155 
1156     // Now check if we can track transitively returned values. If possible, thus
1157     // if all return value can be represented in the current scope, do so.
1158     bool Unresolved = false;
1159     for (auto &RetValAAIt : RetValAA.returned_values()) {
1160       Value *RetVal = RetValAAIt.first;
1161       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1162           isa<Constant>(RetVal))
1163         continue;
1164       // Anything that did not fit in the above categories cannot be resolved,
1165       // mark the call as unresolved.
1166       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1167                            "cannot be translated: "
1168                         << *RetVal << "\n");
1169       UnresolvedCalls.insert(CB);
1170       Unresolved = true;
1171       break;
1172     }
1173 
1174     if (Unresolved)
1175       return;
1176 
1177     // Now track transitively returned values.
1178     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1179     if (NumRetAA == RetValAA.getNumReturnValues()) {
1180       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1181                            "changed since it was seen last\n");
1182       return;
1183     }
1184     NumRetAA = RetValAA.getNumReturnValues();
1185 
1186     for (auto &RetValAAIt : RetValAA.returned_values()) {
1187       Value *RetVal = RetValAAIt.first;
1188       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1189         // Arguments are mapped to call site operands and we begin the traversal
1190         // again.
1191         bool Unused = false;
1192         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1193         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1194         continue;
1195       }
1196       if (isa<CallBase>(RetVal)) {
1197         // Call sites are resolved by the callee attribute over time, no need to
1198         // do anything for us.
1199         continue;
1200       }
1201       if (isa<Constant>(RetVal)) {
1202         // Constants are valid everywhere, we can simply take them.
1203         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1204         continue;
1205       }
1206     }
1207   };
1208 
1209   for (auto &It : ReturnedValues)
1210     HandleReturnValue(It.first, It.second);
1211 
1212   // Because processing the new information can again lead to new return values
1213   // we have to be careful and iterate until this iteration is complete. The
1214   // idea is that we are in a stable state at the end of an update. All return
1215   // values have been handled and properly categorized. We might not update
1216   // again if we have not requested a non-fix attribute so we cannot "wait" for
1217   // the next update to analyze a new return value.
1218   while (!NewRVsMap.empty()) {
1219     auto It = std::move(NewRVsMap.back());
1220     NewRVsMap.pop_back();
1221 
1222     assert(!It.second.empty() && "Entry does not add anything.");
1223     auto &ReturnInsts = ReturnedValues[It.first];
1224     for (ReturnInst *RI : It.second)
1225       if (ReturnInsts.insert(RI)) {
1226         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1227                           << *It.first << " => " << *RI << "\n");
1228         HandleReturnValue(It.first, ReturnInsts);
1229         Changed = true;
1230       }
1231   }
1232 
1233   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1234   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1235 }
1236 
1237 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
AAReturnedValuesFunction__anon811b40a70111::AAReturnedValuesFunction1238   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1239       : AAReturnedValuesImpl(IRP, A) {}
1240 
1241   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAReturnedValuesFunction1242   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1243 };
1244 
1245 /// Returned values information for a call sites.
1246 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
AAReturnedValuesCallSite__anon811b40a70111::AAReturnedValuesCallSite1247   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1248       : AAReturnedValuesImpl(IRP, A) {}
1249 
1250   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAReturnedValuesCallSite1251   void initialize(Attributor &A) override {
1252     // TODO: Once we have call site specific value information we can provide
1253     //       call site specific liveness information and then it makes
1254     //       sense to specialize attributes for call sites instead of
1255     //       redirecting requests to the callee.
1256     llvm_unreachable("Abstract attributes for returned values are not "
1257                      "supported for call sites yet!");
1258   }
1259 
1260   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAReturnedValuesCallSite1261   ChangeStatus updateImpl(Attributor &A) override {
1262     return indicatePessimisticFixpoint();
1263   }
1264 
1265   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAReturnedValuesCallSite1266   void trackStatistics() const override {}
1267 };
1268 
1269 /// ------------------------ NoSync Function Attribute -------------------------
1270 
1271 struct AANoSyncImpl : AANoSync {
AANoSyncImpl__anon811b40a70111::AANoSyncImpl1272   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1273 
getAsStr__anon811b40a70111::AANoSyncImpl1274   const std::string getAsStr() const override {
1275     return getAssumed() ? "nosync" : "may-sync";
1276   }
1277 
1278   /// See AbstractAttribute::updateImpl(...).
1279   ChangeStatus updateImpl(Attributor &A) override;
1280 
1281   /// Helper function used to determine whether an instruction is non-relaxed
1282   /// atomic. In other words, if an atomic instruction does not have unordered
1283   /// or monotonic ordering
1284   static bool isNonRelaxedAtomic(Instruction *I);
1285 
1286   /// Helper function specific for intrinsics which are potentially volatile
1287   static bool isNoSyncIntrinsic(Instruction *I);
1288 };
1289 
isNonRelaxedAtomic(Instruction * I)1290 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1291   if (!I->isAtomic())
1292     return false;
1293 
1294   if (auto *FI = dyn_cast<FenceInst>(I))
1295     // All legal orderings for fence are stronger than monotonic.
1296     return FI->getSyncScopeID() != SyncScope::SingleThread;
1297   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1298     // Unordered is not a legal ordering for cmpxchg.
1299     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1300             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1301   }
1302 
1303   AtomicOrdering Ordering;
1304   switch (I->getOpcode()) {
1305   case Instruction::AtomicRMW:
1306     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1307     break;
1308   case Instruction::Store:
1309     Ordering = cast<StoreInst>(I)->getOrdering();
1310     break;
1311   case Instruction::Load:
1312     Ordering = cast<LoadInst>(I)->getOrdering();
1313     break;
1314   default:
1315     llvm_unreachable(
1316         "New atomic operations need to be known in the attributor.");
1317   }
1318 
1319   return (Ordering != AtomicOrdering::Unordered &&
1320           Ordering != AtomicOrdering::Monotonic);
1321 }
1322 
1323 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1324 /// which would be nosync except that they have a volatile flag.  All other
1325 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
isNoSyncIntrinsic(Instruction * I)1326 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1327   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1328     return !MI->isVolatile();
1329   return false;
1330 }
1331 
updateImpl(Attributor & A)1332 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1333 
1334   auto CheckRWInstForNoSync = [&](Instruction &I) {
1335     /// We are looking for volatile instructions or Non-Relaxed atomics.
1336 
1337     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1338       if (CB->hasFnAttr(Attribute::NoSync))
1339         return true;
1340 
1341       if (isNoSyncIntrinsic(&I))
1342         return true;
1343 
1344       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1345           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1346       return NoSyncAA.isAssumedNoSync();
1347     }
1348 
1349     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1350       return true;
1351 
1352     return false;
1353   };
1354 
1355   auto CheckForNoSync = [&](Instruction &I) {
1356     // At this point we handled all read/write effects and they are all
1357     // nosync, so they can be skipped.
1358     if (I.mayReadOrWriteMemory())
1359       return true;
1360 
1361     // non-convergent and readnone imply nosync.
1362     return !cast<CallBase>(I).isConvergent();
1363   };
1364 
1365   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1366       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1367     return indicatePessimisticFixpoint();
1368 
1369   return ChangeStatus::UNCHANGED;
1370 }
1371 
1372 struct AANoSyncFunction final : public AANoSyncImpl {
AANoSyncFunction__anon811b40a70111::AANoSyncFunction1373   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1374       : AANoSyncImpl(IRP, A) {}
1375 
1376   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoSyncFunction1377   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1378 };
1379 
1380 /// NoSync attribute deduction for a call sites.
1381 struct AANoSyncCallSite final : AANoSyncImpl {
AANoSyncCallSite__anon811b40a70111::AANoSyncCallSite1382   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1383       : AANoSyncImpl(IRP, A) {}
1384 
1385   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoSyncCallSite1386   void initialize(Attributor &A) override {
1387     AANoSyncImpl::initialize(A);
1388     Function *F = getAssociatedFunction();
1389     if (!F || F->isDeclaration())
1390       indicatePessimisticFixpoint();
1391   }
1392 
1393   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoSyncCallSite1394   ChangeStatus updateImpl(Attributor &A) override {
1395     // TODO: Once we have call site specific value information we can provide
1396     //       call site specific liveness information and then it makes
1397     //       sense to specialize attributes for call sites arguments instead of
1398     //       redirecting requests to the callee argument.
1399     Function *F = getAssociatedFunction();
1400     const IRPosition &FnPos = IRPosition::function(*F);
1401     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1402     return clampStateAndIndicateChange(getState(), FnAA.getState());
1403   }
1404 
1405   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoSyncCallSite1406   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1407 };
1408 
1409 /// ------------------------ No-Free Attributes ----------------------------
1410 
1411 struct AANoFreeImpl : public AANoFree {
AANoFreeImpl__anon811b40a70111::AANoFreeImpl1412   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1413 
1414   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoFreeImpl1415   ChangeStatus updateImpl(Attributor &A) override {
1416     auto CheckForNoFree = [&](Instruction &I) {
1417       const auto &CB = cast<CallBase>(I);
1418       if (CB.hasFnAttr(Attribute::NoFree))
1419         return true;
1420 
1421       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1422           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1423       return NoFreeAA.isAssumedNoFree();
1424     };
1425 
1426     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1427       return indicatePessimisticFixpoint();
1428     return ChangeStatus::UNCHANGED;
1429   }
1430 
1431   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AANoFreeImpl1432   const std::string getAsStr() const override {
1433     return getAssumed() ? "nofree" : "may-free";
1434   }
1435 };
1436 
1437 struct AANoFreeFunction final : public AANoFreeImpl {
AANoFreeFunction__anon811b40a70111::AANoFreeFunction1438   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1439       : AANoFreeImpl(IRP, A) {}
1440 
1441   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoFreeFunction1442   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1443 };
1444 
1445 /// NoFree attribute deduction for a call sites.
1446 struct AANoFreeCallSite final : AANoFreeImpl {
AANoFreeCallSite__anon811b40a70111::AANoFreeCallSite1447   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1448       : AANoFreeImpl(IRP, A) {}
1449 
1450   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoFreeCallSite1451   void initialize(Attributor &A) override {
1452     AANoFreeImpl::initialize(A);
1453     Function *F = getAssociatedFunction();
1454     if (!F || F->isDeclaration())
1455       indicatePessimisticFixpoint();
1456   }
1457 
1458   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoFreeCallSite1459   ChangeStatus updateImpl(Attributor &A) override {
1460     // TODO: Once we have call site specific value information we can provide
1461     //       call site specific liveness information and then it makes
1462     //       sense to specialize attributes for call sites arguments instead of
1463     //       redirecting requests to the callee argument.
1464     Function *F = getAssociatedFunction();
1465     const IRPosition &FnPos = IRPosition::function(*F);
1466     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1467     return clampStateAndIndicateChange(getState(), FnAA.getState());
1468   }
1469 
1470   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoFreeCallSite1471   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1472 };
1473 
1474 /// NoFree attribute for floating values.
1475 struct AANoFreeFloating : AANoFreeImpl {
AANoFreeFloating__anon811b40a70111::AANoFreeFloating1476   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1477       : AANoFreeImpl(IRP, A) {}
1478 
1479   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoFreeFloating1480   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1481 
1482   /// See Abstract Attribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoFreeFloating1483   ChangeStatus updateImpl(Attributor &A) override {
1484     const IRPosition &IRP = getIRPosition();
1485 
1486     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1487         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1488     if (NoFreeAA.isAssumedNoFree())
1489       return ChangeStatus::UNCHANGED;
1490 
1491     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1492     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1493       Instruction *UserI = cast<Instruction>(U.getUser());
1494       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1495         if (CB->isBundleOperand(&U))
1496           return false;
1497         if (!CB->isArgOperand(&U))
1498           return true;
1499         unsigned ArgNo = CB->getArgOperandNo(&U);
1500 
1501         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1502             *this, IRPosition::callsite_argument(*CB, ArgNo),
1503             DepClassTy::REQUIRED);
1504         return NoFreeArg.isAssumedNoFree();
1505       }
1506 
1507       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1508           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1509         Follow = true;
1510         return true;
1511       }
1512       if (isa<ReturnInst>(UserI))
1513         return true;
1514 
1515       // Unknown user.
1516       return false;
1517     };
1518     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1519       return indicatePessimisticFixpoint();
1520 
1521     return ChangeStatus::UNCHANGED;
1522   }
1523 };
1524 
1525 /// NoFree attribute for a call site argument.
1526 struct AANoFreeArgument final : AANoFreeFloating {
AANoFreeArgument__anon811b40a70111::AANoFreeArgument1527   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1528       : AANoFreeFloating(IRP, A) {}
1529 
1530   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoFreeArgument1531   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1532 };
1533 
1534 /// NoFree attribute for call site arguments.
1535 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
AANoFreeCallSiteArgument__anon811b40a70111::AANoFreeCallSiteArgument1536   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1537       : AANoFreeFloating(IRP, A) {}
1538 
1539   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoFreeCallSiteArgument1540   ChangeStatus updateImpl(Attributor &A) override {
1541     // TODO: Once we have call site specific value information we can provide
1542     //       call site specific liveness information and then it makes
1543     //       sense to specialize attributes for call sites arguments instead of
1544     //       redirecting requests to the callee argument.
1545     Argument *Arg = getAssociatedArgument();
1546     if (!Arg)
1547       return indicatePessimisticFixpoint();
1548     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1549     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1550     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1551   }
1552 
1553   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoFreeCallSiteArgument1554   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1555 };
1556 
1557 /// NoFree attribute for function return value.
1558 struct AANoFreeReturned final : AANoFreeFloating {
AANoFreeReturned__anon811b40a70111::AANoFreeReturned1559   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1560       : AANoFreeFloating(IRP, A) {
1561     llvm_unreachable("NoFree is not applicable to function returns!");
1562   }
1563 
1564   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoFreeReturned1565   void initialize(Attributor &A) override {
1566     llvm_unreachable("NoFree is not applicable to function returns!");
1567   }
1568 
1569   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoFreeReturned1570   ChangeStatus updateImpl(Attributor &A) override {
1571     llvm_unreachable("NoFree is not applicable to function returns!");
1572   }
1573 
1574   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoFreeReturned1575   void trackStatistics() const override {}
1576 };
1577 
1578 /// NoFree attribute deduction for a call site return value.
1579 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
AANoFreeCallSiteReturned__anon811b40a70111::AANoFreeCallSiteReturned1580   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1581       : AANoFreeFloating(IRP, A) {}
1582 
manifest__anon811b40a70111::AANoFreeCallSiteReturned1583   ChangeStatus manifest(Attributor &A) override {
1584     return ChangeStatus::UNCHANGED;
1585   }
1586   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoFreeCallSiteReturned1587   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1588 };
1589 
1590 /// ------------------------ NonNull Argument Attribute ------------------------
getKnownNonNullAndDerefBytesForUse(Attributor & A,const AbstractAttribute & QueryingAA,Value & AssociatedValue,const Use * U,const Instruction * I,bool & IsNonNull,bool & TrackUse)1591 static int64_t getKnownNonNullAndDerefBytesForUse(
1592     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1593     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1594   TrackUse = false;
1595 
1596   const Value *UseV = U->get();
1597   if (!UseV->getType()->isPointerTy())
1598     return 0;
1599 
1600   // We need to follow common pointer manipulation uses to the accesses they
1601   // feed into. We can try to be smart to avoid looking through things we do not
1602   // like for now, e.g., non-inbounds GEPs.
1603   if (isa<CastInst>(I)) {
1604     TrackUse = true;
1605     return 0;
1606   }
1607 
1608   if (isa<GetElementPtrInst>(I)) {
1609     TrackUse = true;
1610     return 0;
1611   }
1612 
1613   Type *PtrTy = UseV->getType();
1614   const Function *F = I->getFunction();
1615   bool NullPointerIsDefined =
1616       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1617   const DataLayout &DL = A.getInfoCache().getDL();
1618   if (const auto *CB = dyn_cast<CallBase>(I)) {
1619     if (CB->isBundleOperand(U)) {
1620       if (RetainedKnowledge RK = getKnowledgeFromUse(
1621               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1622         IsNonNull |=
1623             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1624         return RK.ArgValue;
1625       }
1626       return 0;
1627     }
1628 
1629     if (CB->isCallee(U)) {
1630       IsNonNull |= !NullPointerIsDefined;
1631       return 0;
1632     }
1633 
1634     unsigned ArgNo = CB->getArgOperandNo(U);
1635     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1636     // As long as we only use known information there is no need to track
1637     // dependences here.
1638     auto &DerefAA =
1639         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1640     IsNonNull |= DerefAA.isKnownNonNull();
1641     return DerefAA.getKnownDereferenceableBytes();
1642   }
1643 
1644   int64_t Offset;
1645   const Value *Base =
1646       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1647   if (Base) {
1648     if (Base == &AssociatedValue &&
1649         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1650       int64_t DerefBytes =
1651           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1652 
1653       IsNonNull |= !NullPointerIsDefined;
1654       return std::max(int64_t(0), DerefBytes);
1655     }
1656   }
1657 
1658   /// Corner case when an offset is 0.
1659   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1660                                               /*AllowNonInbounds*/ true);
1661   if (Base) {
1662     if (Offset == 0 && Base == &AssociatedValue &&
1663         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1664       int64_t DerefBytes =
1665           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1666       IsNonNull |= !NullPointerIsDefined;
1667       return std::max(int64_t(0), DerefBytes);
1668     }
1669   }
1670 
1671   return 0;
1672 }
1673 
1674 struct AANonNullImpl : AANonNull {
AANonNullImpl__anon811b40a70111::AANonNullImpl1675   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1676       : AANonNull(IRP, A),
1677         NullIsDefined(NullPointerIsDefined(
1678             getAnchorScope(),
1679             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1680 
1681   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANonNullImpl1682   void initialize(Attributor &A) override {
1683     Value &V = getAssociatedValue();
1684     if (!NullIsDefined &&
1685         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1686                 /* IgnoreSubsumingPositions */ false, &A)) {
1687       indicateOptimisticFixpoint();
1688       return;
1689     }
1690 
1691     if (isa<ConstantPointerNull>(V)) {
1692       indicatePessimisticFixpoint();
1693       return;
1694     }
1695 
1696     AANonNull::initialize(A);
1697 
1698     bool CanBeNull, CanBeFreed;
1699     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1700                                          CanBeFreed)) {
1701       if (!CanBeNull) {
1702         indicateOptimisticFixpoint();
1703         return;
1704       }
1705     }
1706 
1707     if (isa<GlobalValue>(&getAssociatedValue())) {
1708       indicatePessimisticFixpoint();
1709       return;
1710     }
1711 
1712     if (Instruction *CtxI = getCtxI())
1713       followUsesInMBEC(*this, A, getState(), *CtxI);
1714   }
1715 
1716   /// See followUsesInMBEC
followUseInMBEC__anon811b40a70111::AANonNullImpl1717   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1718                        AANonNull::StateType &State) {
1719     bool IsNonNull = false;
1720     bool TrackUse = false;
1721     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1722                                        IsNonNull, TrackUse);
1723     State.setKnown(IsNonNull);
1724     return TrackUse;
1725   }
1726 
1727   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AANonNullImpl1728   const std::string getAsStr() const override {
1729     return getAssumed() ? "nonnull" : "may-null";
1730   }
1731 
1732   /// Flag to determine if the underlying value can be null and still allow
1733   /// valid accesses.
1734   const bool NullIsDefined;
1735 };
1736 
1737 /// NonNull attribute for a floating value.
1738 struct AANonNullFloating : public AANonNullImpl {
AANonNullFloating__anon811b40a70111::AANonNullFloating1739   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1740       : AANonNullImpl(IRP, A) {}
1741 
1742   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANonNullFloating1743   ChangeStatus updateImpl(Attributor &A) override {
1744     const DataLayout &DL = A.getDataLayout();
1745 
1746     DominatorTree *DT = nullptr;
1747     AssumptionCache *AC = nullptr;
1748     InformationCache &InfoCache = A.getInfoCache();
1749     if (const Function *Fn = getAnchorScope()) {
1750       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1751       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1752     }
1753 
1754     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1755                             AANonNull::StateType &T, bool Stripped) -> bool {
1756       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1757                                              DepClassTy::REQUIRED);
1758       if (!Stripped && this == &AA) {
1759         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1760           T.indicatePessimisticFixpoint();
1761       } else {
1762         // Use abstract attribute information.
1763         const AANonNull::StateType &NS = AA.getState();
1764         T ^= NS;
1765       }
1766       return T.isValidState();
1767     };
1768 
1769     StateType T;
1770     if (!genericValueTraversal<AANonNull, StateType>(
1771             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1772       return indicatePessimisticFixpoint();
1773 
1774     return clampStateAndIndicateChange(getState(), T);
1775   }
1776 
1777   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANonNullFloating1778   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1779 };
1780 
1781 /// NonNull attribute for function return value.
1782 struct AANonNullReturned final
1783     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
AANonNullReturned__anon811b40a70111::AANonNullReturned1784   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1785       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1786 
1787   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AANonNullReturned1788   const std::string getAsStr() const override {
1789     return getAssumed() ? "nonnull" : "may-null";
1790   }
1791 
1792   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANonNullReturned1793   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1794 };
1795 
1796 /// NonNull attribute for function argument.
1797 struct AANonNullArgument final
1798     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
AANonNullArgument__anon811b40a70111::AANonNullArgument1799   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1800       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1801 
1802   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANonNullArgument1803   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1804 };
1805 
1806 struct AANonNullCallSiteArgument final : AANonNullFloating {
AANonNullCallSiteArgument__anon811b40a70111::AANonNullCallSiteArgument1807   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1808       : AANonNullFloating(IRP, A) {}
1809 
1810   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANonNullCallSiteArgument1811   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1812 };
1813 
1814 /// NonNull attribute for a call site return position.
1815 struct AANonNullCallSiteReturned final
1816     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
AANonNullCallSiteReturned__anon811b40a70111::AANonNullCallSiteReturned1817   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1818       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1819 
1820   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANonNullCallSiteReturned1821   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1822 };
1823 
1824 /// ------------------------ No-Recurse Attributes ----------------------------
1825 
1826 struct AANoRecurseImpl : public AANoRecurse {
AANoRecurseImpl__anon811b40a70111::AANoRecurseImpl1827   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1828 
1829   /// See AbstractAttribute::getAsStr()
getAsStr__anon811b40a70111::AANoRecurseImpl1830   const std::string getAsStr() const override {
1831     return getAssumed() ? "norecurse" : "may-recurse";
1832   }
1833 };
1834 
1835 struct AANoRecurseFunction final : AANoRecurseImpl {
AANoRecurseFunction__anon811b40a70111::AANoRecurseFunction1836   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1837       : AANoRecurseImpl(IRP, A) {}
1838 
1839   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoRecurseFunction1840   void initialize(Attributor &A) override {
1841     AANoRecurseImpl::initialize(A);
1842     if (const Function *F = getAnchorScope())
1843       if (A.getInfoCache().getSccSize(*F) != 1)
1844         indicatePessimisticFixpoint();
1845   }
1846 
1847   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoRecurseFunction1848   ChangeStatus updateImpl(Attributor &A) override {
1849 
1850     // If all live call sites are known to be no-recurse, we are as well.
1851     auto CallSitePred = [&](AbstractCallSite ACS) {
1852       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1853           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1854           DepClassTy::NONE);
1855       return NoRecurseAA.isKnownNoRecurse();
1856     };
1857     bool AllCallSitesKnown;
1858     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1859       // If we know all call sites and all are known no-recurse, we are done.
1860       // If all known call sites, which might not be all that exist, are known
1861       // to be no-recurse, we are not done but we can continue to assume
1862       // no-recurse. If one of the call sites we have not visited will become
1863       // live, another update is triggered.
1864       if (AllCallSitesKnown)
1865         indicateOptimisticFixpoint();
1866       return ChangeStatus::UNCHANGED;
1867     }
1868 
1869     // If the above check does not hold anymore we look at the calls.
1870     auto CheckForNoRecurse = [&](Instruction &I) {
1871       const auto &CB = cast<CallBase>(I);
1872       if (CB.hasFnAttr(Attribute::NoRecurse))
1873         return true;
1874 
1875       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1876           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1877       if (!NoRecurseAA.isAssumedNoRecurse())
1878         return false;
1879 
1880       // Recursion to the same function
1881       if (CB.getCalledFunction() == getAnchorScope())
1882         return false;
1883 
1884       return true;
1885     };
1886 
1887     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1888       return indicatePessimisticFixpoint();
1889     return ChangeStatus::UNCHANGED;
1890   }
1891 
trackStatistics__anon811b40a70111::AANoRecurseFunction1892   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1893 };
1894 
1895 /// NoRecurse attribute deduction for a call sites.
1896 struct AANoRecurseCallSite final : AANoRecurseImpl {
AANoRecurseCallSite__anon811b40a70111::AANoRecurseCallSite1897   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1898       : AANoRecurseImpl(IRP, A) {}
1899 
1900   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoRecurseCallSite1901   void initialize(Attributor &A) override {
1902     AANoRecurseImpl::initialize(A);
1903     Function *F = getAssociatedFunction();
1904     if (!F || F->isDeclaration())
1905       indicatePessimisticFixpoint();
1906   }
1907 
1908   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoRecurseCallSite1909   ChangeStatus updateImpl(Attributor &A) override {
1910     // TODO: Once we have call site specific value information we can provide
1911     //       call site specific liveness information and then it makes
1912     //       sense to specialize attributes for call sites arguments instead of
1913     //       redirecting requests to the callee argument.
1914     Function *F = getAssociatedFunction();
1915     const IRPosition &FnPos = IRPosition::function(*F);
1916     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1917     return clampStateAndIndicateChange(getState(), FnAA.getState());
1918   }
1919 
1920   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoRecurseCallSite1921   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1922 };
1923 
1924 /// -------------------- Undefined-Behavior Attributes ------------------------
1925 
1926 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
AAUndefinedBehaviorImpl__anon811b40a70111::AAUndefinedBehaviorImpl1927   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1928       : AAUndefinedBehavior(IRP, A) {}
1929 
1930   /// See AbstractAttribute::updateImpl(...).
1931   // through a pointer (i.e. also branches etc.)
updateImpl__anon811b40a70111::AAUndefinedBehaviorImpl1932   ChangeStatus updateImpl(Attributor &A) override {
1933     const size_t UBPrevSize = KnownUBInsts.size();
1934     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1935 
1936     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1937       // Skip instructions that are already saved.
1938       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1939         return true;
1940 
1941       // If we reach here, we know we have an instruction
1942       // that accesses memory through a pointer operand,
1943       // for which getPointerOperand() should give it to us.
1944       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1945       assert(PtrOp &&
1946              "Expected pointer operand of memory accessing instruction");
1947 
1948       // Either we stopped and the appropriate action was taken,
1949       // or we got back a simplified value to continue.
1950       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1951       if (!SimplifiedPtrOp.hasValue())
1952         return true;
1953       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1954 
1955       // A memory access through a pointer is considered UB
1956       // only if the pointer has constant null value.
1957       // TODO: Expand it to not only check constant values.
1958       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1959         AssumedNoUBInsts.insert(&I);
1960         return true;
1961       }
1962       const Type *PtrTy = PtrOpVal->getType();
1963 
1964       // Because we only consider instructions inside functions,
1965       // assume that a parent function exists.
1966       const Function *F = I.getFunction();
1967 
1968       // A memory access using constant null pointer is only considered UB
1969       // if null pointer is _not_ defined for the target platform.
1970       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1971         AssumedNoUBInsts.insert(&I);
1972       else
1973         KnownUBInsts.insert(&I);
1974       return true;
1975     };
1976 
1977     auto InspectBrInstForUB = [&](Instruction &I) {
1978       // A conditional branch instruction is considered UB if it has `undef`
1979       // condition.
1980 
1981       // Skip instructions that are already saved.
1982       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1983         return true;
1984 
1985       // We know we have a branch instruction.
1986       auto BrInst = cast<BranchInst>(&I);
1987 
1988       // Unconditional branches are never considered UB.
1989       if (BrInst->isUnconditional())
1990         return true;
1991 
1992       // Either we stopped and the appropriate action was taken,
1993       // or we got back a simplified value to continue.
1994       Optional<Value *> SimplifiedCond =
1995           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1996       if (!SimplifiedCond.hasValue())
1997         return true;
1998       AssumedNoUBInsts.insert(&I);
1999       return true;
2000     };
2001 
2002     auto InspectCallSiteForUB = [&](Instruction &I) {
2003       // Check whether a callsite always cause UB or not
2004 
2005       // Skip instructions that are already saved.
2006       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2007         return true;
2008 
2009       // Check nonnull and noundef argument attribute violation for each
2010       // callsite.
2011       CallBase &CB = cast<CallBase>(I);
2012       Function *Callee = CB.getCalledFunction();
2013       if (!Callee)
2014         return true;
2015       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2016         // If current argument is known to be simplified to null pointer and the
2017         // corresponding argument position is known to have nonnull attribute,
2018         // the argument is poison. Furthermore, if the argument is poison and
2019         // the position is known to have noundef attriubte, this callsite is
2020         // considered UB.
2021         if (idx >= Callee->arg_size())
2022           break;
2023         Value *ArgVal = CB.getArgOperand(idx);
2024         if (!ArgVal)
2025           continue;
2026         // Here, we handle three cases.
2027         //   (1) Not having a value means it is dead. (we can replace the value
2028         //       with undef)
2029         //   (2) Simplified to undef. The argument violate noundef attriubte.
2030         //   (3) Simplified to null pointer where known to be nonnull.
2031         //       The argument is a poison value and violate noundef attribute.
2032         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2033         auto &NoUndefAA =
2034             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2035         if (!NoUndefAA.isKnownNoUndef())
2036           continue;
2037         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2038             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2039         if (!ValueSimplifyAA.isKnown())
2040           continue;
2041         Optional<Value *> SimplifiedVal =
2042             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2043         if (!SimplifiedVal.hasValue() ||
2044             isa<UndefValue>(*SimplifiedVal.getValue())) {
2045           KnownUBInsts.insert(&I);
2046           continue;
2047         }
2048         if (!ArgVal->getType()->isPointerTy() ||
2049             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2050           continue;
2051         auto &NonNullAA =
2052             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2053         if (NonNullAA.isKnownNonNull())
2054           KnownUBInsts.insert(&I);
2055       }
2056       return true;
2057     };
2058 
2059     auto InspectReturnInstForUB =
2060         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2061           // Check if a return instruction always cause UB or not
2062           // Note: It is guaranteed that the returned position of the anchor
2063           //       scope has noundef attribute when this is called.
2064           //       We also ensure the return position is not "assumed dead"
2065           //       because the returned value was then potentially simplified to
2066           //       `undef` in AAReturnedValues without removing the `noundef`
2067           //       attribute yet.
2068 
2069           // When the returned position has noundef attriubte, UB occur in the
2070           // following cases.
2071           //   (1) Returned value is known to be undef.
2072           //   (2) The value is known to be a null pointer and the returned
2073           //       position has nonnull attribute (because the returned value is
2074           //       poison).
2075           bool FoundUB = false;
2076           if (isa<UndefValue>(V)) {
2077             FoundUB = true;
2078           } else {
2079             if (isa<ConstantPointerNull>(V)) {
2080               auto &NonNullAA = A.getAAFor<AANonNull>(
2081                   *this, IRPosition::returned(*getAnchorScope()),
2082                   DepClassTy::NONE);
2083               if (NonNullAA.isKnownNonNull())
2084                 FoundUB = true;
2085             }
2086           }
2087 
2088           if (FoundUB)
2089             for (ReturnInst *RI : RetInsts)
2090               KnownUBInsts.insert(RI);
2091           return true;
2092         };
2093 
2094     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2095                               {Instruction::Load, Instruction::Store,
2096                                Instruction::AtomicCmpXchg,
2097                                Instruction::AtomicRMW},
2098                               /* CheckBBLivenessOnly */ true);
2099     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2100                               /* CheckBBLivenessOnly */ true);
2101     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2102 
2103     // If the returned position of the anchor scope has noundef attriubte, check
2104     // all returned instructions.
2105     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2106       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2107       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2108         auto &RetPosNoUndefAA =
2109             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2110         if (RetPosNoUndefAA.isKnownNoUndef())
2111           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2112                                                     *this);
2113       }
2114     }
2115 
2116     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2117         UBPrevSize != KnownUBInsts.size())
2118       return ChangeStatus::CHANGED;
2119     return ChangeStatus::UNCHANGED;
2120   }
2121 
isKnownToCauseUB__anon811b40a70111::AAUndefinedBehaviorImpl2122   bool isKnownToCauseUB(Instruction *I) const override {
2123     return KnownUBInsts.count(I);
2124   }
2125 
isAssumedToCauseUB__anon811b40a70111::AAUndefinedBehaviorImpl2126   bool isAssumedToCauseUB(Instruction *I) const override {
2127     // In simple words, if an instruction is not in the assumed to _not_
2128     // cause UB, then it is assumed UB (that includes those
2129     // in the KnownUBInsts set). The rest is boilerplate
2130     // is to ensure that it is one of the instructions we test
2131     // for UB.
2132 
2133     switch (I->getOpcode()) {
2134     case Instruction::Load:
2135     case Instruction::Store:
2136     case Instruction::AtomicCmpXchg:
2137     case Instruction::AtomicRMW:
2138       return !AssumedNoUBInsts.count(I);
2139     case Instruction::Br: {
2140       auto BrInst = cast<BranchInst>(I);
2141       if (BrInst->isUnconditional())
2142         return false;
2143       return !AssumedNoUBInsts.count(I);
2144     } break;
2145     default:
2146       return false;
2147     }
2148     return false;
2149   }
2150 
manifest__anon811b40a70111::AAUndefinedBehaviorImpl2151   ChangeStatus manifest(Attributor &A) override {
2152     if (KnownUBInsts.empty())
2153       return ChangeStatus::UNCHANGED;
2154     for (Instruction *I : KnownUBInsts)
2155       A.changeToUnreachableAfterManifest(I);
2156     return ChangeStatus::CHANGED;
2157   }
2158 
2159   /// See AbstractAttribute::getAsStr()
getAsStr__anon811b40a70111::AAUndefinedBehaviorImpl2160   const std::string getAsStr() const override {
2161     return getAssumed() ? "undefined-behavior" : "no-ub";
2162   }
2163 
2164   /// Note: The correctness of this analysis depends on the fact that the
2165   /// following 2 sets will stop changing after some point.
2166   /// "Change" here means that their size changes.
2167   /// The size of each set is monotonically increasing
2168   /// (we only add items to them) and it is upper bounded by the number of
2169   /// instructions in the processed function (we can never save more
2170   /// elements in either set than this number). Hence, at some point,
2171   /// they will stop increasing.
2172   /// Consequently, at some point, both sets will have stopped
2173   /// changing, effectively making the analysis reach a fixpoint.
2174 
2175   /// Note: These 2 sets are disjoint and an instruction can be considered
2176   /// one of 3 things:
2177   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2178   ///    the KnownUBInsts set.
2179   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2180   ///    has a reason to assume it).
2181   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2182   ///    could not find a reason to assume or prove that it can cause UB,
2183   ///    hence it assumes it doesn't. We have a set for these instructions
2184   ///    so that we don't reprocess them in every update.
2185   ///    Note however that instructions in this set may cause UB.
2186 
2187 protected:
2188   /// A set of all live instructions _known_ to cause UB.
2189   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2190 
2191 private:
2192   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2193   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2194 
2195   // Should be called on updates in which if we're processing an instruction
2196   // \p I that depends on a value \p V, one of the following has to happen:
2197   // - If the value is assumed, then stop.
2198   // - If the value is known but undef, then consider it UB.
2199   // - Otherwise, do specific processing with the simplified value.
2200   // We return None in the first 2 cases to signify that an appropriate
2201   // action was taken and the caller should stop.
2202   // Otherwise, we return the simplified value that the caller should
2203   // use for specific processing.
stopOnUndefOrAssumed__anon811b40a70111::AAUndefinedBehaviorImpl2204   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2205                                          Instruction *I) {
2206     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2207         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2208     Optional<Value *> SimplifiedV =
2209         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2210     if (!ValueSimplifyAA.isKnown()) {
2211       // Don't depend on assumed values.
2212       return llvm::None;
2213     }
2214     if (!SimplifiedV.hasValue()) {
2215       // If it is known (which we tested above) but it doesn't have a value,
2216       // then we can assume `undef` and hence the instruction is UB.
2217       KnownUBInsts.insert(I);
2218       return llvm::None;
2219     }
2220     Value *Val = SimplifiedV.getValue();
2221     if (isa<UndefValue>(Val)) {
2222       KnownUBInsts.insert(I);
2223       return llvm::None;
2224     }
2225     return Val;
2226   }
2227 };
2228 
2229 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
AAUndefinedBehaviorFunction__anon811b40a70111::AAUndefinedBehaviorFunction2230   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2231       : AAUndefinedBehaviorImpl(IRP, A) {}
2232 
2233   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAUndefinedBehaviorFunction2234   void trackStatistics() const override {
2235     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2236                "Number of instructions known to have UB");
2237     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2238         KnownUBInsts.size();
2239   }
2240 };
2241 
2242 /// ------------------------ Will-Return Attributes ----------------------------
2243 
2244 // Helper function that checks whether a function has any cycle which we don't
2245 // know if it is bounded or not.
2246 // Loops with maximum trip count are considered bounded, any other cycle not.
mayContainUnboundedCycle(Function & F,Attributor & A)2247 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2248   ScalarEvolution *SE =
2249       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2250   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2251   // If either SCEV or LoopInfo is not available for the function then we assume
2252   // any cycle to be unbounded cycle.
2253   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2254   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2255   if (!SE || !LI) {
2256     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2257       if (SCCI.hasCycle())
2258         return true;
2259     return false;
2260   }
2261 
2262   // If there's irreducible control, the function may contain non-loop cycles.
2263   if (mayContainIrreducibleControl(F, LI))
2264     return true;
2265 
2266   // Any loop that does not have a max trip count is considered unbounded cycle.
2267   for (auto *L : LI->getLoopsInPreorder()) {
2268     if (!SE->getSmallConstantMaxTripCount(L))
2269       return true;
2270   }
2271   return false;
2272 }
2273 
2274 struct AAWillReturnImpl : public AAWillReturn {
AAWillReturnImpl__anon811b40a70111::AAWillReturnImpl2275   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2276       : AAWillReturn(IRP, A) {}
2277 
2278   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAWillReturnImpl2279   void initialize(Attributor &A) override {
2280     AAWillReturn::initialize(A);
2281 
2282     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2283       indicateOptimisticFixpoint();
2284       return;
2285     }
2286   }
2287 
2288   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
isImpliedByMustprogressAndReadonly__anon811b40a70111::AAWillReturnImpl2289   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2290     // Check for `mustprogress` in the scope and the associated function which
2291     // might be different if this is a call site.
2292     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2293         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2294       return false;
2295 
2296     const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2297                                                       DepClassTy::NONE);
2298     if (!MemAA.isAssumedReadOnly())
2299       return false;
2300     if (KnownOnly && !MemAA.isKnownReadOnly())
2301       return false;
2302     if (!MemAA.isKnownReadOnly())
2303       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2304 
2305     return true;
2306   }
2307 
2308   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAWillReturnImpl2309   ChangeStatus updateImpl(Attributor &A) override {
2310     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2311       return ChangeStatus::UNCHANGED;
2312 
2313     auto CheckForWillReturn = [&](Instruction &I) {
2314       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2315       const auto &WillReturnAA =
2316           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2317       if (WillReturnAA.isKnownWillReturn())
2318         return true;
2319       if (!WillReturnAA.isAssumedWillReturn())
2320         return false;
2321       const auto &NoRecurseAA =
2322           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2323       return NoRecurseAA.isAssumedNoRecurse();
2324     };
2325 
2326     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2327       return indicatePessimisticFixpoint();
2328 
2329     return ChangeStatus::UNCHANGED;
2330   }
2331 
2332   /// See AbstractAttribute::getAsStr()
getAsStr__anon811b40a70111::AAWillReturnImpl2333   const std::string getAsStr() const override {
2334     return getAssumed() ? "willreturn" : "may-noreturn";
2335   }
2336 };
2337 
2338 struct AAWillReturnFunction final : AAWillReturnImpl {
AAWillReturnFunction__anon811b40a70111::AAWillReturnFunction2339   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2340       : AAWillReturnImpl(IRP, A) {}
2341 
2342   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAWillReturnFunction2343   void initialize(Attributor &A) override {
2344     AAWillReturnImpl::initialize(A);
2345 
2346     Function *F = getAnchorScope();
2347     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2348       indicatePessimisticFixpoint();
2349   }
2350 
2351   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAWillReturnFunction2352   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2353 };
2354 
2355 /// WillReturn attribute deduction for a call sites.
2356 struct AAWillReturnCallSite final : AAWillReturnImpl {
AAWillReturnCallSite__anon811b40a70111::AAWillReturnCallSite2357   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2358       : AAWillReturnImpl(IRP, A) {}
2359 
2360   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAWillReturnCallSite2361   void initialize(Attributor &A) override {
2362     AAWillReturnImpl::initialize(A);
2363     Function *F = getAssociatedFunction();
2364     if (!F || !A.isFunctionIPOAmendable(*F))
2365       indicatePessimisticFixpoint();
2366   }
2367 
2368   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAWillReturnCallSite2369   ChangeStatus updateImpl(Attributor &A) override {
2370     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2371       return ChangeStatus::UNCHANGED;
2372 
2373     // TODO: Once we have call site specific value information we can provide
2374     //       call site specific liveness information and then it makes
2375     //       sense to specialize attributes for call sites arguments instead of
2376     //       redirecting requests to the callee argument.
2377     Function *F = getAssociatedFunction();
2378     const IRPosition &FnPos = IRPosition::function(*F);
2379     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2380     return clampStateAndIndicateChange(getState(), FnAA.getState());
2381   }
2382 
2383   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAWillReturnCallSite2384   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2385 };
2386 
2387 /// -------------------AAReachability Attribute--------------------------
2388 
2389 struct AAReachabilityImpl : AAReachability {
AAReachabilityImpl__anon811b40a70111::AAReachabilityImpl2390   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2391       : AAReachability(IRP, A) {}
2392 
getAsStr__anon811b40a70111::AAReachabilityImpl2393   const std::string getAsStr() const override {
2394     // TODO: Return the number of reachable queries.
2395     return "reachable";
2396   }
2397 
2398   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAReachabilityImpl2399   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2400 
2401   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAReachabilityImpl2402   ChangeStatus updateImpl(Attributor &A) override {
2403     return indicatePessimisticFixpoint();
2404   }
2405 };
2406 
2407 struct AAReachabilityFunction final : public AAReachabilityImpl {
AAReachabilityFunction__anon811b40a70111::AAReachabilityFunction2408   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2409       : AAReachabilityImpl(IRP, A) {}
2410 
2411   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAReachabilityFunction2412   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2413 };
2414 
2415 /// ------------------------ NoAlias Argument Attribute ------------------------
2416 
2417 struct AANoAliasImpl : AANoAlias {
AANoAliasImpl__anon811b40a70111::AANoAliasImpl2418   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2419     assert(getAssociatedType()->isPointerTy() &&
2420            "Noalias is a pointer attribute");
2421   }
2422 
getAsStr__anon811b40a70111::AANoAliasImpl2423   const std::string getAsStr() const override {
2424     return getAssumed() ? "noalias" : "may-alias";
2425   }
2426 };
2427 
2428 /// NoAlias attribute for a floating value.
2429 struct AANoAliasFloating final : AANoAliasImpl {
AANoAliasFloating__anon811b40a70111::AANoAliasFloating2430   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2431       : AANoAliasImpl(IRP, A) {}
2432 
2433   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoAliasFloating2434   void initialize(Attributor &A) override {
2435     AANoAliasImpl::initialize(A);
2436     Value *Val = &getAssociatedValue();
2437     do {
2438       CastInst *CI = dyn_cast<CastInst>(Val);
2439       if (!CI)
2440         break;
2441       Value *Base = CI->getOperand(0);
2442       if (!Base->hasOneUse())
2443         break;
2444       Val = Base;
2445     } while (true);
2446 
2447     if (!Val->getType()->isPointerTy()) {
2448       indicatePessimisticFixpoint();
2449       return;
2450     }
2451 
2452     if (isa<AllocaInst>(Val))
2453       indicateOptimisticFixpoint();
2454     else if (isa<ConstantPointerNull>(Val) &&
2455              !NullPointerIsDefined(getAnchorScope(),
2456                                    Val->getType()->getPointerAddressSpace()))
2457       indicateOptimisticFixpoint();
2458     else if (Val != &getAssociatedValue()) {
2459       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2460           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2461       if (ValNoAliasAA.isKnownNoAlias())
2462         indicateOptimisticFixpoint();
2463     }
2464   }
2465 
2466   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoAliasFloating2467   ChangeStatus updateImpl(Attributor &A) override {
2468     // TODO: Implement this.
2469     return indicatePessimisticFixpoint();
2470   }
2471 
2472   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoAliasFloating2473   void trackStatistics() const override {
2474     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2475   }
2476 };
2477 
2478 /// NoAlias attribute for an argument.
2479 struct AANoAliasArgument final
2480     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2481   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
AANoAliasArgument__anon811b40a70111::AANoAliasArgument2482   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2483 
2484   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoAliasArgument2485   void initialize(Attributor &A) override {
2486     Base::initialize(A);
2487     // See callsite argument attribute and callee argument attribute.
2488     if (hasAttr({Attribute::ByVal}))
2489       indicateOptimisticFixpoint();
2490   }
2491 
2492   /// See AbstractAttribute::update(...).
updateImpl__anon811b40a70111::AANoAliasArgument2493   ChangeStatus updateImpl(Attributor &A) override {
2494     // We have to make sure no-alias on the argument does not break
2495     // synchronization when this is a callback argument, see also [1] below.
2496     // If synchronization cannot be affected, we delegate to the base updateImpl
2497     // function, otherwise we give up for now.
2498 
2499     // If the function is no-sync, no-alias cannot break synchronization.
2500     const auto &NoSyncAA =
2501         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2502                              DepClassTy::OPTIONAL);
2503     if (NoSyncAA.isAssumedNoSync())
2504       return Base::updateImpl(A);
2505 
2506     // If the argument is read-only, no-alias cannot break synchronization.
2507     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2508         *this, getIRPosition(), DepClassTy::OPTIONAL);
2509     if (MemBehaviorAA.isAssumedReadOnly())
2510       return Base::updateImpl(A);
2511 
2512     // If the argument is never passed through callbacks, no-alias cannot break
2513     // synchronization.
2514     bool AllCallSitesKnown;
2515     if (A.checkForAllCallSites(
2516             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2517             true, AllCallSitesKnown))
2518       return Base::updateImpl(A);
2519 
2520     // TODO: add no-alias but make sure it doesn't break synchronization by
2521     // introducing fake uses. See:
2522     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2523     //     International Workshop on OpenMP 2018,
2524     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2525 
2526     return indicatePessimisticFixpoint();
2527   }
2528 
2529   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoAliasArgument2530   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2531 };
2532 
2533 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
AANoAliasCallSiteArgument__anon811b40a70111::AANoAliasCallSiteArgument2534   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2535       : AANoAliasImpl(IRP, A) {}
2536 
2537   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoAliasCallSiteArgument2538   void initialize(Attributor &A) override {
2539     // See callsite argument attribute and callee argument attribute.
2540     const auto &CB = cast<CallBase>(getAnchorValue());
2541     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2542       indicateOptimisticFixpoint();
2543     Value &Val = getAssociatedValue();
2544     if (isa<ConstantPointerNull>(Val) &&
2545         !NullPointerIsDefined(getAnchorScope(),
2546                               Val.getType()->getPointerAddressSpace()))
2547       indicateOptimisticFixpoint();
2548   }
2549 
2550   /// Determine if the underlying value may alias with the call site argument
2551   /// \p OtherArgNo of \p ICS (= the underlying call site).
mayAliasWithArgument__anon811b40a70111::AANoAliasCallSiteArgument2552   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2553                             const AAMemoryBehavior &MemBehaviorAA,
2554                             const CallBase &CB, unsigned OtherArgNo) {
2555     // We do not need to worry about aliasing with the underlying IRP.
2556     if (this->getCalleeArgNo() == (int)OtherArgNo)
2557       return false;
2558 
2559     // If it is not a pointer or pointer vector we do not alias.
2560     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2561     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2562       return false;
2563 
2564     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2565         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2566 
2567     // If the argument is readnone, there is no read-write aliasing.
2568     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2569       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2570       return false;
2571     }
2572 
2573     // If the argument is readonly and the underlying value is readonly, there
2574     // is no read-write aliasing.
2575     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2576     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2577       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2578       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2579       return false;
2580     }
2581 
2582     // We have to utilize actual alias analysis queries so we need the object.
2583     if (!AAR)
2584       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2585 
2586     // Try to rule it out at the call site.
2587     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2588     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2589                          "callsite arguments: "
2590                       << getAssociatedValue() << " " << *ArgOp << " => "
2591                       << (IsAliasing ? "" : "no-") << "alias \n");
2592 
2593     return IsAliasing;
2594   }
2595 
2596   bool
isKnownNoAliasDueToNoAliasPreservation__anon811b40a70111::AANoAliasCallSiteArgument2597   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2598                                          const AAMemoryBehavior &MemBehaviorAA,
2599                                          const AANoAlias &NoAliasAA) {
2600     // We can deduce "noalias" if the following conditions hold.
2601     // (i)   Associated value is assumed to be noalias in the definition.
2602     // (ii)  Associated value is assumed to be no-capture in all the uses
2603     //       possibly executed before this callsite.
2604     // (iii) There is no other pointer argument which could alias with the
2605     //       value.
2606 
2607     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2608     if (!AssociatedValueIsNoAliasAtDef) {
2609       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2610                         << " is not no-alias at the definition\n");
2611       return false;
2612     }
2613 
2614     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2615 
2616     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2617     const Function *ScopeFn = VIRP.getAnchorScope();
2618     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2619     // Check whether the value is captured in the scope using AANoCapture.
2620     //      Look at CFG and check only uses possibly executed before this
2621     //      callsite.
2622     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2623       Instruction *UserI = cast<Instruction>(U.getUser());
2624 
2625       // If UserI is the curr instruction and there is a single potential use of
2626       // the value in UserI we allow the use.
2627       // TODO: We should inspect the operands and allow those that cannot alias
2628       //       with the value.
2629       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2630         return true;
2631 
2632       if (ScopeFn) {
2633         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2634             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2635 
2636         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2637           return true;
2638 
2639         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2640           if (CB->isArgOperand(&U)) {
2641 
2642             unsigned ArgNo = CB->getArgOperandNo(&U);
2643 
2644             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2645                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2646                 DepClassTy::OPTIONAL);
2647 
2648             if (NoCaptureAA.isAssumedNoCapture())
2649               return true;
2650           }
2651         }
2652       }
2653 
2654       // For cases which can potentially have more users
2655       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2656           isa<SelectInst>(U)) {
2657         Follow = true;
2658         return true;
2659       }
2660 
2661       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2662       return false;
2663     };
2664 
2665     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2666       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2667         LLVM_DEBUG(
2668             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2669                    << " cannot be noalias as it is potentially captured\n");
2670         return false;
2671       }
2672     }
2673     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2674 
2675     // Check there is no other pointer argument which could alias with the
2676     // value passed at this call site.
2677     // TODO: AbstractCallSite
2678     const auto &CB = cast<CallBase>(getAnchorValue());
2679     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2680          OtherArgNo++)
2681       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2682         return false;
2683 
2684     return true;
2685   }
2686 
2687   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoAliasCallSiteArgument2688   ChangeStatus updateImpl(Attributor &A) override {
2689     // If the argument is readnone we are done as there are no accesses via the
2690     // argument.
2691     auto &MemBehaviorAA =
2692         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2693     if (MemBehaviorAA.isAssumedReadNone()) {
2694       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2695       return ChangeStatus::UNCHANGED;
2696     }
2697 
2698     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2699     const auto &NoAliasAA =
2700         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2701 
2702     AAResults *AAR = nullptr;
2703     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2704                                                NoAliasAA)) {
2705       LLVM_DEBUG(
2706           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2707       return ChangeStatus::UNCHANGED;
2708     }
2709 
2710     return indicatePessimisticFixpoint();
2711   }
2712 
2713   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoAliasCallSiteArgument2714   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2715 };
2716 
2717 /// NoAlias attribute for function return value.
2718 struct AANoAliasReturned final : AANoAliasImpl {
AANoAliasReturned__anon811b40a70111::AANoAliasReturned2719   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2720       : AANoAliasImpl(IRP, A) {}
2721 
2722   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoAliasReturned2723   void initialize(Attributor &A) override {
2724     AANoAliasImpl::initialize(A);
2725     Function *F = getAssociatedFunction();
2726     if (!F || F->isDeclaration())
2727       indicatePessimisticFixpoint();
2728   }
2729 
2730   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoAliasReturned2731   virtual ChangeStatus updateImpl(Attributor &A) override {
2732 
2733     auto CheckReturnValue = [&](Value &RV) -> bool {
2734       if (Constant *C = dyn_cast<Constant>(&RV))
2735         if (C->isNullValue() || isa<UndefValue>(C))
2736           return true;
2737 
2738       /// For now, we can only deduce noalias if we have call sites.
2739       /// FIXME: add more support.
2740       if (!isa<CallBase>(&RV))
2741         return false;
2742 
2743       const IRPosition &RVPos = IRPosition::value(RV);
2744       const auto &NoAliasAA =
2745           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2746       if (!NoAliasAA.isAssumedNoAlias())
2747         return false;
2748 
2749       const auto &NoCaptureAA =
2750           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2751       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2752     };
2753 
2754     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2755       return indicatePessimisticFixpoint();
2756 
2757     return ChangeStatus::UNCHANGED;
2758   }
2759 
2760   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoAliasReturned2761   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2762 };
2763 
2764 /// NoAlias attribute deduction for a call site return value.
2765 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
AANoAliasCallSiteReturned__anon811b40a70111::AANoAliasCallSiteReturned2766   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2767       : AANoAliasImpl(IRP, A) {}
2768 
2769   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoAliasCallSiteReturned2770   void initialize(Attributor &A) override {
2771     AANoAliasImpl::initialize(A);
2772     Function *F = getAssociatedFunction();
2773     if (!F || F->isDeclaration())
2774       indicatePessimisticFixpoint();
2775   }
2776 
2777   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoAliasCallSiteReturned2778   ChangeStatus updateImpl(Attributor &A) override {
2779     // TODO: Once we have call site specific value information we can provide
2780     //       call site specific liveness information and then it makes
2781     //       sense to specialize attributes for call sites arguments instead of
2782     //       redirecting requests to the callee argument.
2783     Function *F = getAssociatedFunction();
2784     const IRPosition &FnPos = IRPosition::returned(*F);
2785     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2786     return clampStateAndIndicateChange(getState(), FnAA.getState());
2787   }
2788 
2789   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoAliasCallSiteReturned2790   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2791 };
2792 
2793 /// -------------------AAIsDead Function Attribute-----------------------
2794 
2795 struct AAIsDeadValueImpl : public AAIsDead {
AAIsDeadValueImpl__anon811b40a70111::AAIsDeadValueImpl2796   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2797 
2798   /// See AAIsDead::isAssumedDead().
isAssumedDead__anon811b40a70111::AAIsDeadValueImpl2799   bool isAssumedDead() const override { return getAssumed(); }
2800 
2801   /// See AAIsDead::isKnownDead().
isKnownDead__anon811b40a70111::AAIsDeadValueImpl2802   bool isKnownDead() const override { return getKnown(); }
2803 
2804   /// See AAIsDead::isAssumedDead(BasicBlock *).
isAssumedDead__anon811b40a70111::AAIsDeadValueImpl2805   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2806 
2807   /// See AAIsDead::isKnownDead(BasicBlock *).
isKnownDead__anon811b40a70111::AAIsDeadValueImpl2808   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2809 
2810   /// See AAIsDead::isAssumedDead(Instruction *I).
isAssumedDead__anon811b40a70111::AAIsDeadValueImpl2811   bool isAssumedDead(const Instruction *I) const override {
2812     return I == getCtxI() && isAssumedDead();
2813   }
2814 
2815   /// See AAIsDead::isKnownDead(Instruction *I).
isKnownDead__anon811b40a70111::AAIsDeadValueImpl2816   bool isKnownDead(const Instruction *I) const override {
2817     return isAssumedDead(I) && getKnown();
2818   }
2819 
2820   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AAIsDeadValueImpl2821   const std::string getAsStr() const override {
2822     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2823   }
2824 
2825   /// Check if all uses are assumed dead.
areAllUsesAssumedDead__anon811b40a70111::AAIsDeadValueImpl2826   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2827     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2828     // Explicitly set the dependence class to required because we want a long
2829     // chain of N dependent instructions to be considered live as soon as one is
2830     // without going through N update cycles. This is not required for
2831     // correctness.
2832     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2833   }
2834 
2835   /// Determine if \p I is assumed to be side-effect free.
isAssumedSideEffectFree__anon811b40a70111::AAIsDeadValueImpl2836   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2837     if (!I || wouldInstructionBeTriviallyDead(I))
2838       return true;
2839 
2840     auto *CB = dyn_cast<CallBase>(I);
2841     if (!CB || isa<IntrinsicInst>(CB))
2842       return false;
2843 
2844     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2845     const auto &NoUnwindAA =
2846         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2847     if (!NoUnwindAA.isAssumedNoUnwind())
2848       return false;
2849     if (!NoUnwindAA.isKnownNoUnwind())
2850       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2851 
2852     const auto &MemBehaviorAA =
2853         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2854     if (MemBehaviorAA.isAssumedReadOnly()) {
2855       if (!MemBehaviorAA.isKnownReadOnly())
2856         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2857       return true;
2858     }
2859     return false;
2860   }
2861 };
2862 
2863 struct AAIsDeadFloating : public AAIsDeadValueImpl {
AAIsDeadFloating__anon811b40a70111::AAIsDeadFloating2864   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2865       : AAIsDeadValueImpl(IRP, A) {}
2866 
2867   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAIsDeadFloating2868   void initialize(Attributor &A) override {
2869     if (isa<UndefValue>(getAssociatedValue())) {
2870       indicatePessimisticFixpoint();
2871       return;
2872     }
2873 
2874     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2875     if (!isAssumedSideEffectFree(A, I))
2876       indicatePessimisticFixpoint();
2877   }
2878 
2879   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAIsDeadFloating2880   ChangeStatus updateImpl(Attributor &A) override {
2881     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2882     if (!isAssumedSideEffectFree(A, I))
2883       return indicatePessimisticFixpoint();
2884 
2885     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2886       return indicatePessimisticFixpoint();
2887     return ChangeStatus::UNCHANGED;
2888   }
2889 
2890   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAIsDeadFloating2891   ChangeStatus manifest(Attributor &A) override {
2892     Value &V = getAssociatedValue();
2893     if (auto *I = dyn_cast<Instruction>(&V)) {
2894       // If we get here we basically know the users are all dead. We check if
2895       // isAssumedSideEffectFree returns true here again because it might not be
2896       // the case and only the users are dead but the instruction (=call) is
2897       // still needed.
2898       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2899         A.deleteAfterManifest(*I);
2900         return ChangeStatus::CHANGED;
2901       }
2902     }
2903     if (V.use_empty())
2904       return ChangeStatus::UNCHANGED;
2905 
2906     bool UsedAssumedInformation = false;
2907     Optional<Constant *> C =
2908         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2909     if (C.hasValue() && C.getValue())
2910       return ChangeStatus::UNCHANGED;
2911 
2912     // Replace the value with undef as it is dead but keep droppable uses around
2913     // as they provide information we don't want to give up on just yet.
2914     UndefValue &UV = *UndefValue::get(V.getType());
2915     bool AnyChange =
2916         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2917     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2918   }
2919 
2920   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAIsDeadFloating2921   void trackStatistics() const override {
2922     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2923   }
2924 };
2925 
2926 struct AAIsDeadArgument : public AAIsDeadFloating {
AAIsDeadArgument__anon811b40a70111::AAIsDeadArgument2927   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2928       : AAIsDeadFloating(IRP, A) {}
2929 
2930   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAIsDeadArgument2931   void initialize(Attributor &A) override {
2932     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2933       indicatePessimisticFixpoint();
2934   }
2935 
2936   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAIsDeadArgument2937   ChangeStatus manifest(Attributor &A) override {
2938     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2939     Argument &Arg = *getAssociatedArgument();
2940     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2941       if (A.registerFunctionSignatureRewrite(
2942               Arg, /* ReplacementTypes */ {},
2943               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2944               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2945         Arg.dropDroppableUses();
2946         return ChangeStatus::CHANGED;
2947       }
2948     return Changed;
2949   }
2950 
2951   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAIsDeadArgument2952   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2953 };
2954 
2955 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
AAIsDeadCallSiteArgument__anon811b40a70111::AAIsDeadCallSiteArgument2956   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2957       : AAIsDeadValueImpl(IRP, A) {}
2958 
2959   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAIsDeadCallSiteArgument2960   void initialize(Attributor &A) override {
2961     if (isa<UndefValue>(getAssociatedValue()))
2962       indicatePessimisticFixpoint();
2963   }
2964 
2965   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAIsDeadCallSiteArgument2966   ChangeStatus updateImpl(Attributor &A) override {
2967     // TODO: Once we have call site specific value information we can provide
2968     //       call site specific liveness information and then it makes
2969     //       sense to specialize attributes for call sites arguments instead of
2970     //       redirecting requests to the callee argument.
2971     Argument *Arg = getAssociatedArgument();
2972     if (!Arg)
2973       return indicatePessimisticFixpoint();
2974     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2975     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2976     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2977   }
2978 
2979   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAIsDeadCallSiteArgument2980   ChangeStatus manifest(Attributor &A) override {
2981     CallBase &CB = cast<CallBase>(getAnchorValue());
2982     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2983     assert(!isa<UndefValue>(U.get()) &&
2984            "Expected undef values to be filtered out!");
2985     UndefValue &UV = *UndefValue::get(U->getType());
2986     if (A.changeUseAfterManifest(U, UV))
2987       return ChangeStatus::CHANGED;
2988     return ChangeStatus::UNCHANGED;
2989   }
2990 
2991   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAIsDeadCallSiteArgument2992   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2993 };
2994 
2995 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
AAIsDeadCallSiteReturned__anon811b40a70111::AAIsDeadCallSiteReturned2996   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2997       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2998 
2999   /// See AAIsDead::isAssumedDead().
isAssumedDead__anon811b40a70111::AAIsDeadCallSiteReturned3000   bool isAssumedDead() const override {
3001     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3002   }
3003 
3004   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAIsDeadCallSiteReturned3005   void initialize(Attributor &A) override {
3006     if (isa<UndefValue>(getAssociatedValue())) {
3007       indicatePessimisticFixpoint();
3008       return;
3009     }
3010 
3011     // We track this separately as a secondary state.
3012     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3013   }
3014 
3015   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAIsDeadCallSiteReturned3016   ChangeStatus updateImpl(Attributor &A) override {
3017     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3018     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3019       IsAssumedSideEffectFree = false;
3020       Changed = ChangeStatus::CHANGED;
3021     }
3022 
3023     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3024       return indicatePessimisticFixpoint();
3025     return Changed;
3026   }
3027 
3028   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAIsDeadCallSiteReturned3029   void trackStatistics() const override {
3030     if (IsAssumedSideEffectFree)
3031       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3032     else
3033       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3034   }
3035 
3036   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AAIsDeadCallSiteReturned3037   const std::string getAsStr() const override {
3038     return isAssumedDead()
3039                ? "assumed-dead"
3040                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3041   }
3042 
3043 private:
3044   bool IsAssumedSideEffectFree;
3045 };
3046 
3047 struct AAIsDeadReturned : public AAIsDeadValueImpl {
AAIsDeadReturned__anon811b40a70111::AAIsDeadReturned3048   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3049       : AAIsDeadValueImpl(IRP, A) {}
3050 
3051   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAIsDeadReturned3052   ChangeStatus updateImpl(Attributor &A) override {
3053 
3054     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3055                               {Instruction::Ret});
3056 
3057     auto PredForCallSite = [&](AbstractCallSite ACS) {
3058       if (ACS.isCallbackCall() || !ACS.getInstruction())
3059         return false;
3060       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3061     };
3062 
3063     bool AllCallSitesKnown;
3064     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3065                                 AllCallSitesKnown))
3066       return indicatePessimisticFixpoint();
3067 
3068     return ChangeStatus::UNCHANGED;
3069   }
3070 
3071   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAIsDeadReturned3072   ChangeStatus manifest(Attributor &A) override {
3073     // TODO: Rewrite the signature to return void?
3074     bool AnyChange = false;
3075     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3076     auto RetInstPred = [&](Instruction &I) {
3077       ReturnInst &RI = cast<ReturnInst>(I);
3078       if (!isa<UndefValue>(RI.getReturnValue()))
3079         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3080       return true;
3081     };
3082     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3083     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3084   }
3085 
3086   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAIsDeadReturned3087   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3088 };
3089 
3090 struct AAIsDeadFunction : public AAIsDead {
AAIsDeadFunction__anon811b40a70111::AAIsDeadFunction3091   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3092 
3093   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAIsDeadFunction3094   void initialize(Attributor &A) override {
3095     const Function *F = getAnchorScope();
3096     if (F && !F->isDeclaration()) {
3097       // We only want to compute liveness once. If the function is not part of
3098       // the SCC, skip it.
3099       if (A.isRunOn(*const_cast<Function *>(F))) {
3100         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3101         assumeLive(A, F->getEntryBlock());
3102       } else {
3103         indicatePessimisticFixpoint();
3104       }
3105     }
3106   }
3107 
3108   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AAIsDeadFunction3109   const std::string getAsStr() const override {
3110     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3111            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3112            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3113            std::to_string(KnownDeadEnds.size()) + "]";
3114   }
3115 
3116   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAIsDeadFunction3117   ChangeStatus manifest(Attributor &A) override {
3118     assert(getState().isValidState() &&
3119            "Attempted to manifest an invalid state!");
3120 
3121     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3122     Function &F = *getAnchorScope();
3123 
3124     if (AssumedLiveBlocks.empty()) {
3125       A.deleteAfterManifest(F);
3126       return ChangeStatus::CHANGED;
3127     }
3128 
3129     // Flag to determine if we can change an invoke to a call assuming the
3130     // callee is nounwind. This is not possible if the personality of the
3131     // function allows to catch asynchronous exceptions.
3132     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3133 
3134     KnownDeadEnds.set_union(ToBeExploredFrom);
3135     for (const Instruction *DeadEndI : KnownDeadEnds) {
3136       auto *CB = dyn_cast<CallBase>(DeadEndI);
3137       if (!CB)
3138         continue;
3139       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3140           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3141       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3142       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3143         continue;
3144 
3145       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3146         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3147       else
3148         A.changeToUnreachableAfterManifest(
3149             const_cast<Instruction *>(DeadEndI->getNextNode()));
3150       HasChanged = ChangeStatus::CHANGED;
3151     }
3152 
3153     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3154     for (BasicBlock &BB : F)
3155       if (!AssumedLiveBlocks.count(&BB)) {
3156         A.deleteAfterManifest(BB);
3157         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3158       }
3159 
3160     return HasChanged;
3161   }
3162 
3163   /// See AbstractAttribute::updateImpl(...).
3164   ChangeStatus updateImpl(Attributor &A) override;
3165 
isEdgeDead__anon811b40a70111::AAIsDeadFunction3166   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3167     return !AssumedLiveEdges.count(std::make_pair(From, To));
3168   }
3169 
3170   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAIsDeadFunction3171   void trackStatistics() const override {}
3172 
3173   /// Returns true if the function is assumed dead.
isAssumedDead__anon811b40a70111::AAIsDeadFunction3174   bool isAssumedDead() const override { return false; }
3175 
3176   /// See AAIsDead::isKnownDead().
isKnownDead__anon811b40a70111::AAIsDeadFunction3177   bool isKnownDead() const override { return false; }
3178 
3179   /// See AAIsDead::isAssumedDead(BasicBlock *).
isAssumedDead__anon811b40a70111::AAIsDeadFunction3180   bool isAssumedDead(const BasicBlock *BB) const override {
3181     assert(BB->getParent() == getAnchorScope() &&
3182            "BB must be in the same anchor scope function.");
3183 
3184     if (!getAssumed())
3185       return false;
3186     return !AssumedLiveBlocks.count(BB);
3187   }
3188 
3189   /// See AAIsDead::isKnownDead(BasicBlock *).
isKnownDead__anon811b40a70111::AAIsDeadFunction3190   bool isKnownDead(const BasicBlock *BB) const override {
3191     return getKnown() && isAssumedDead(BB);
3192   }
3193 
3194   /// See AAIsDead::isAssumed(Instruction *I).
isAssumedDead__anon811b40a70111::AAIsDeadFunction3195   bool isAssumedDead(const Instruction *I) const override {
3196     assert(I->getParent()->getParent() == getAnchorScope() &&
3197            "Instruction must be in the same anchor scope function.");
3198 
3199     if (!getAssumed())
3200       return false;
3201 
3202     // If it is not in AssumedLiveBlocks then it for sure dead.
3203     // Otherwise, it can still be after noreturn call in a live block.
3204     if (!AssumedLiveBlocks.count(I->getParent()))
3205       return true;
3206 
3207     // If it is not after a liveness barrier it is live.
3208     const Instruction *PrevI = I->getPrevNode();
3209     while (PrevI) {
3210       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3211         return true;
3212       PrevI = PrevI->getPrevNode();
3213     }
3214     return false;
3215   }
3216 
3217   /// See AAIsDead::isKnownDead(Instruction *I).
isKnownDead__anon811b40a70111::AAIsDeadFunction3218   bool isKnownDead(const Instruction *I) const override {
3219     return getKnown() && isAssumedDead(I);
3220   }
3221 
3222   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3223   /// that internal function called from \p BB should now be looked at.
assumeLive__anon811b40a70111::AAIsDeadFunction3224   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3225     if (!AssumedLiveBlocks.insert(&BB).second)
3226       return false;
3227 
3228     // We assume that all of BB is (probably) live now and if there are calls to
3229     // internal functions we will assume that those are now live as well. This
3230     // is a performance optimization for blocks with calls to a lot of internal
3231     // functions. It can however cause dead functions to be treated as live.
3232     for (const Instruction &I : BB)
3233       if (const auto *CB = dyn_cast<CallBase>(&I))
3234         if (const Function *F = CB->getCalledFunction())
3235           if (F->hasLocalLinkage())
3236             A.markLiveInternalFunction(*F);
3237     return true;
3238   }
3239 
3240   /// Collection of instructions that need to be explored again, e.g., we
3241   /// did assume they do not transfer control to (one of their) successors.
3242   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3243 
3244   /// Collection of instructions that are known to not transfer control.
3245   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3246 
3247   /// Collection of all assumed live edges
3248   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3249 
3250   /// Collection of all assumed live BasicBlocks.
3251   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3252 };
3253 
3254 static bool
identifyAliveSuccessors(Attributor & A,const CallBase & CB,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3255 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3256                         AbstractAttribute &AA,
3257                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3258   const IRPosition &IPos = IRPosition::callsite_function(CB);
3259 
3260   const auto &NoReturnAA =
3261       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3262   if (NoReturnAA.isAssumedNoReturn())
3263     return !NoReturnAA.isKnownNoReturn();
3264   if (CB.isTerminator())
3265     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3266   else
3267     AliveSuccessors.push_back(CB.getNextNode());
3268   return false;
3269 }
3270 
3271 static bool
identifyAliveSuccessors(Attributor & A,const InvokeInst & II,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3272 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3273                         AbstractAttribute &AA,
3274                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3275   bool UsedAssumedInformation =
3276       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3277 
3278   // First, determine if we can change an invoke to a call assuming the
3279   // callee is nounwind. This is not possible if the personality of the
3280   // function allows to catch asynchronous exceptions.
3281   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3282     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3283   } else {
3284     const IRPosition &IPos = IRPosition::callsite_function(II);
3285     const auto &AANoUnw =
3286         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3287     if (AANoUnw.isAssumedNoUnwind()) {
3288       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3289     } else {
3290       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3291     }
3292   }
3293   return UsedAssumedInformation;
3294 }
3295 
3296 static bool
identifyAliveSuccessors(Attributor & A,const BranchInst & BI,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3297 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3298                         AbstractAttribute &AA,
3299                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3300   bool UsedAssumedInformation = false;
3301   if (BI.getNumSuccessors() == 1) {
3302     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3303   } else {
3304     Optional<ConstantInt *> CI = getAssumedConstantInt(
3305         A, *BI.getCondition(), AA, UsedAssumedInformation);
3306     if (!CI.hasValue()) {
3307       // No value yet, assume both edges are dead.
3308     } else if (CI.getValue()) {
3309       const BasicBlock *SuccBB =
3310           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3311       AliveSuccessors.push_back(&SuccBB->front());
3312     } else {
3313       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3314       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3315       UsedAssumedInformation = false;
3316     }
3317   }
3318   return UsedAssumedInformation;
3319 }
3320 
3321 static bool
identifyAliveSuccessors(Attributor & A,const SwitchInst & SI,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3322 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3323                         AbstractAttribute &AA,
3324                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3325   bool UsedAssumedInformation = false;
3326   Optional<ConstantInt *> CI =
3327       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3328   if (!CI.hasValue()) {
3329     // No value yet, assume all edges are dead.
3330   } else if (CI.getValue()) {
3331     for (auto &CaseIt : SI.cases()) {
3332       if (CaseIt.getCaseValue() == CI.getValue()) {
3333         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3334         return UsedAssumedInformation;
3335       }
3336     }
3337     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3338     return UsedAssumedInformation;
3339   } else {
3340     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3341       AliveSuccessors.push_back(&SuccBB->front());
3342   }
3343   return UsedAssumedInformation;
3344 }
3345 
updateImpl(Attributor & A)3346 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3347   ChangeStatus Change = ChangeStatus::UNCHANGED;
3348 
3349   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3350                     << getAnchorScope()->size() << "] BBs and "
3351                     << ToBeExploredFrom.size() << " exploration points and "
3352                     << KnownDeadEnds.size() << " known dead ends\n");
3353 
3354   // Copy and clear the list of instructions we need to explore from. It is
3355   // refilled with instructions the next update has to look at.
3356   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3357                                                ToBeExploredFrom.end());
3358   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3359 
3360   SmallVector<const Instruction *, 8> AliveSuccessors;
3361   while (!Worklist.empty()) {
3362     const Instruction *I = Worklist.pop_back_val();
3363     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3364 
3365     // Fast forward for uninteresting instructions. We could look for UB here
3366     // though.
3367     while (!I->isTerminator() && !isa<CallBase>(I)) {
3368       Change = ChangeStatus::CHANGED;
3369       I = I->getNextNode();
3370     }
3371 
3372     AliveSuccessors.clear();
3373 
3374     bool UsedAssumedInformation = false;
3375     switch (I->getOpcode()) {
3376     // TODO: look for (assumed) UB to backwards propagate "deadness".
3377     default:
3378       assert(I->isTerminator() &&
3379              "Expected non-terminators to be handled already!");
3380       for (const BasicBlock *SuccBB : successors(I->getParent()))
3381         AliveSuccessors.push_back(&SuccBB->front());
3382       break;
3383     case Instruction::Call:
3384       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3385                                                        *this, AliveSuccessors);
3386       break;
3387     case Instruction::Invoke:
3388       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3389                                                        *this, AliveSuccessors);
3390       break;
3391     case Instruction::Br:
3392       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3393                                                        *this, AliveSuccessors);
3394       break;
3395     case Instruction::Switch:
3396       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3397                                                        *this, AliveSuccessors);
3398       break;
3399     }
3400 
3401     if (UsedAssumedInformation) {
3402       NewToBeExploredFrom.insert(I);
3403     } else {
3404       Change = ChangeStatus::CHANGED;
3405       if (AliveSuccessors.empty() ||
3406           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3407         KnownDeadEnds.insert(I);
3408     }
3409 
3410     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3411                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3412                       << UsedAssumedInformation << "\n");
3413 
3414     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3415       if (!I->isTerminator()) {
3416         assert(AliveSuccessors.size() == 1 &&
3417                "Non-terminator expected to have a single successor!");
3418         Worklist.push_back(AliveSuccessor);
3419       } else {
3420         // record the assumed live edge
3421         AssumedLiveEdges.insert(
3422             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3423         if (assumeLive(A, *AliveSuccessor->getParent()))
3424           Worklist.push_back(AliveSuccessor);
3425       }
3426     }
3427   }
3428 
3429   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3430 
3431   // If we know everything is live there is no need to query for liveness.
3432   // Instead, indicating a pessimistic fixpoint will cause the state to be
3433   // "invalid" and all queries to be answered conservatively without lookups.
3434   // To be in this state we have to (1) finished the exploration and (3) not
3435   // discovered any non-trivial dead end and (2) not ruled unreachable code
3436   // dead.
3437   if (ToBeExploredFrom.empty() &&
3438       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3439       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3440         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3441       }))
3442     return indicatePessimisticFixpoint();
3443   return Change;
3444 }
3445 
3446 /// Liveness information for a call sites.
3447 struct AAIsDeadCallSite final : AAIsDeadFunction {
AAIsDeadCallSite__anon811b40a70111::AAIsDeadCallSite3448   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3449       : AAIsDeadFunction(IRP, A) {}
3450 
3451   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAIsDeadCallSite3452   void initialize(Attributor &A) override {
3453     // TODO: Once we have call site specific value information we can provide
3454     //       call site specific liveness information and then it makes
3455     //       sense to specialize attributes for call sites instead of
3456     //       redirecting requests to the callee.
3457     llvm_unreachable("Abstract attributes for liveness are not "
3458                      "supported for call sites yet!");
3459   }
3460 
3461   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAIsDeadCallSite3462   ChangeStatus updateImpl(Attributor &A) override {
3463     return indicatePessimisticFixpoint();
3464   }
3465 
3466   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAIsDeadCallSite3467   void trackStatistics() const override {}
3468 };
3469 
3470 /// -------------------- Dereferenceable Argument Attribute --------------------
3471 
3472 template <>
clampStateAndIndicateChange(DerefState & S,const DerefState & R)3473 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3474                                                      const DerefState &R) {
3475   ChangeStatus CS0 =
3476       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3477   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3478   return CS0 | CS1;
3479 }
3480 
3481 struct AADereferenceableImpl : AADereferenceable {
AADereferenceableImpl__anon811b40a70111::AADereferenceableImpl3482   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3483       : AADereferenceable(IRP, A) {}
3484   using StateType = DerefState;
3485 
3486   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AADereferenceableImpl3487   void initialize(Attributor &A) override {
3488     SmallVector<Attribute, 4> Attrs;
3489     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3490              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3491     for (const Attribute &Attr : Attrs)
3492       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3493 
3494     const IRPosition &IRP = this->getIRPosition();
3495     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3496 
3497     bool CanBeNull, CanBeFreed;
3498     takeKnownDerefBytesMaximum(
3499         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3500             A.getDataLayout(), CanBeNull, CanBeFreed));
3501 
3502     bool IsFnInterface = IRP.isFnInterfaceKind();
3503     Function *FnScope = IRP.getAnchorScope();
3504     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3505       indicatePessimisticFixpoint();
3506       return;
3507     }
3508 
3509     if (Instruction *CtxI = getCtxI())
3510       followUsesInMBEC(*this, A, getState(), *CtxI);
3511   }
3512 
3513   /// See AbstractAttribute::getState()
3514   /// {
getState__anon811b40a70111::AADereferenceableImpl3515   StateType &getState() override { return *this; }
getState__anon811b40a70111::AADereferenceableImpl3516   const StateType &getState() const override { return *this; }
3517   /// }
3518 
3519   /// Helper function for collecting accessed bytes in must-be-executed-context
addAccessedBytesForUse__anon811b40a70111::AADereferenceableImpl3520   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3521                               DerefState &State) {
3522     const Value *UseV = U->get();
3523     if (!UseV->getType()->isPointerTy())
3524       return;
3525 
3526     Type *PtrTy = UseV->getType();
3527     const DataLayout &DL = A.getDataLayout();
3528     int64_t Offset;
3529     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3530             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3531       if (Base == &getAssociatedValue() &&
3532           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3533         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3534         State.addAccessedBytes(Offset, Size);
3535       }
3536     }
3537   }
3538 
3539   /// See followUsesInMBEC
followUseInMBEC__anon811b40a70111::AADereferenceableImpl3540   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3541                        AADereferenceable::StateType &State) {
3542     bool IsNonNull = false;
3543     bool TrackUse = false;
3544     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3545         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3546     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3547                       << " for instruction " << *I << "\n");
3548 
3549     addAccessedBytesForUse(A, U, I, State);
3550     State.takeKnownDerefBytesMaximum(DerefBytes);
3551     return TrackUse;
3552   }
3553 
3554   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AADereferenceableImpl3555   ChangeStatus manifest(Attributor &A) override {
3556     ChangeStatus Change = AADereferenceable::manifest(A);
3557     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3558       removeAttrs({Attribute::DereferenceableOrNull});
3559       return ChangeStatus::CHANGED;
3560     }
3561     return Change;
3562   }
3563 
getDeducedAttributes__anon811b40a70111::AADereferenceableImpl3564   void getDeducedAttributes(LLVMContext &Ctx,
3565                             SmallVectorImpl<Attribute> &Attrs) const override {
3566     // TODO: Add *_globally support
3567     if (isAssumedNonNull())
3568       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3569           Ctx, getAssumedDereferenceableBytes()));
3570     else
3571       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3572           Ctx, getAssumedDereferenceableBytes()));
3573   }
3574 
3575   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AADereferenceableImpl3576   const std::string getAsStr() const override {
3577     if (!getAssumedDereferenceableBytes())
3578       return "unknown-dereferenceable";
3579     return std::string("dereferenceable") +
3580            (isAssumedNonNull() ? "" : "_or_null") +
3581            (isAssumedGlobal() ? "_globally" : "") + "<" +
3582            std::to_string(getKnownDereferenceableBytes()) + "-" +
3583            std::to_string(getAssumedDereferenceableBytes()) + ">";
3584   }
3585 };
3586 
3587 /// Dereferenceable attribute for a floating value.
3588 struct AADereferenceableFloating : AADereferenceableImpl {
AADereferenceableFloating__anon811b40a70111::AADereferenceableFloating3589   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3590       : AADereferenceableImpl(IRP, A) {}
3591 
3592   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AADereferenceableFloating3593   ChangeStatus updateImpl(Attributor &A) override {
3594     const DataLayout &DL = A.getDataLayout();
3595 
3596     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3597                             bool Stripped) -> bool {
3598       unsigned IdxWidth =
3599           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3600       APInt Offset(IdxWidth, 0);
3601       const Value *Base =
3602           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3603 
3604       const auto &AA = A.getAAFor<AADereferenceable>(
3605           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3606       int64_t DerefBytes = 0;
3607       if (!Stripped && this == &AA) {
3608         // Use IR information if we did not strip anything.
3609         // TODO: track globally.
3610         bool CanBeNull, CanBeFreed;
3611         DerefBytes =
3612           Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3613         T.GlobalState.indicatePessimisticFixpoint();
3614       } else {
3615         const DerefState &DS = AA.getState();
3616         DerefBytes = DS.DerefBytesState.getAssumed();
3617         T.GlobalState &= DS.GlobalState;
3618       }
3619 
3620       // For now we do not try to "increase" dereferenceability due to negative
3621       // indices as we first have to come up with code to deal with loops and
3622       // for overflows of the dereferenceable bytes.
3623       int64_t OffsetSExt = Offset.getSExtValue();
3624       if (OffsetSExt < 0)
3625         OffsetSExt = 0;
3626 
3627       T.takeAssumedDerefBytesMinimum(
3628           std::max(int64_t(0), DerefBytes - OffsetSExt));
3629 
3630       if (this == &AA) {
3631         if (!Stripped) {
3632           // If nothing was stripped IR information is all we got.
3633           T.takeKnownDerefBytesMaximum(
3634               std::max(int64_t(0), DerefBytes - OffsetSExt));
3635           T.indicatePessimisticFixpoint();
3636         } else if (OffsetSExt > 0) {
3637           // If something was stripped but there is circular reasoning we look
3638           // for the offset. If it is positive we basically decrease the
3639           // dereferenceable bytes in a circluar loop now, which will simply
3640           // drive them down to the known value in a very slow way which we
3641           // can accelerate.
3642           T.indicatePessimisticFixpoint();
3643         }
3644       }
3645 
3646       return T.isValidState();
3647     };
3648 
3649     DerefState T;
3650     if (!genericValueTraversal<AADereferenceable, DerefState>(
3651             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3652       return indicatePessimisticFixpoint();
3653 
3654     return clampStateAndIndicateChange(getState(), T);
3655   }
3656 
3657   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AADereferenceableFloating3658   void trackStatistics() const override {
3659     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3660   }
3661 };
3662 
3663 /// Dereferenceable attribute for a return value.
3664 struct AADereferenceableReturned final
3665     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
AADereferenceableReturned__anon811b40a70111::AADereferenceableReturned3666   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3667       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3668             IRP, A) {}
3669 
3670   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AADereferenceableReturned3671   void trackStatistics() const override {
3672     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3673   }
3674 };
3675 
3676 /// Dereferenceable attribute for an argument
3677 struct AADereferenceableArgument final
3678     : AAArgumentFromCallSiteArguments<AADereferenceable,
3679                                       AADereferenceableImpl> {
3680   using Base =
3681       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
AADereferenceableArgument__anon811b40a70111::AADereferenceableArgument3682   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3683       : Base(IRP, A) {}
3684 
3685   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AADereferenceableArgument3686   void trackStatistics() const override {
3687     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3688   }
3689 };
3690 
3691 /// Dereferenceable attribute for a call site argument.
3692 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
AADereferenceableCallSiteArgument__anon811b40a70111::AADereferenceableCallSiteArgument3693   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3694       : AADereferenceableFloating(IRP, A) {}
3695 
3696   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AADereferenceableCallSiteArgument3697   void trackStatistics() const override {
3698     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3699   }
3700 };
3701 
3702 /// Dereferenceable attribute deduction for a call site return value.
3703 struct AADereferenceableCallSiteReturned final
3704     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3705   using Base =
3706       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
AADereferenceableCallSiteReturned__anon811b40a70111::AADereferenceableCallSiteReturned3707   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3708       : Base(IRP, A) {}
3709 
3710   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AADereferenceableCallSiteReturned3711   void trackStatistics() const override {
3712     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3713   }
3714 };
3715 
3716 // ------------------------ Align Argument Attribute ------------------------
3717 
getKnownAlignForUse(Attributor & A,AAAlign & QueryingAA,Value & AssociatedValue,const Use * U,const Instruction * I,bool & TrackUse)3718 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3719                                     Value &AssociatedValue, const Use *U,
3720                                     const Instruction *I, bool &TrackUse) {
3721   // We need to follow common pointer manipulation uses to the accesses they
3722   // feed into.
3723   if (isa<CastInst>(I)) {
3724     // Follow all but ptr2int casts.
3725     TrackUse = !isa<PtrToIntInst>(I);
3726     return 0;
3727   }
3728   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3729     if (GEP->hasAllConstantIndices())
3730       TrackUse = true;
3731     return 0;
3732   }
3733 
3734   MaybeAlign MA;
3735   if (const auto *CB = dyn_cast<CallBase>(I)) {
3736     if (CB->isBundleOperand(U) || CB->isCallee(U))
3737       return 0;
3738 
3739     unsigned ArgNo = CB->getArgOperandNo(U);
3740     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3741     // As long as we only use known information there is no need to track
3742     // dependences here.
3743     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3744     MA = MaybeAlign(AlignAA.getKnownAlign());
3745   }
3746 
3747   const DataLayout &DL = A.getDataLayout();
3748   const Value *UseV = U->get();
3749   if (auto *SI = dyn_cast<StoreInst>(I)) {
3750     if (SI->getPointerOperand() == UseV)
3751       MA = SI->getAlign();
3752   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3753     if (LI->getPointerOperand() == UseV)
3754       MA = LI->getAlign();
3755   }
3756 
3757   if (!MA || *MA <= QueryingAA.getKnownAlign())
3758     return 0;
3759 
3760   unsigned Alignment = MA->value();
3761   int64_t Offset;
3762 
3763   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3764     if (Base == &AssociatedValue) {
3765       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3766       // So we can say that the maximum power of two which is a divisor of
3767       // gcd(Offset, Alignment) is an alignment.
3768 
3769       uint32_t gcd =
3770           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3771       Alignment = llvm::PowerOf2Floor(gcd);
3772     }
3773   }
3774 
3775   return Alignment;
3776 }
3777 
3778 struct AAAlignImpl : AAAlign {
AAAlignImpl__anon811b40a70111::AAAlignImpl3779   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3780 
3781   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAAlignImpl3782   void initialize(Attributor &A) override {
3783     SmallVector<Attribute, 4> Attrs;
3784     getAttrs({Attribute::Alignment}, Attrs);
3785     for (const Attribute &Attr : Attrs)
3786       takeKnownMaximum(Attr.getValueAsInt());
3787 
3788     Value &V = getAssociatedValue();
3789     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3790     //       use of the function pointer. This was caused by D73131. We want to
3791     //       avoid this for function pointers especially because we iterate
3792     //       their uses and int2ptr is not handled. It is not a correctness
3793     //       problem though!
3794     if (!V.getType()->getPointerElementType()->isFunctionTy())
3795       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3796 
3797     if (getIRPosition().isFnInterfaceKind() &&
3798         (!getAnchorScope() ||
3799          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3800       indicatePessimisticFixpoint();
3801       return;
3802     }
3803 
3804     if (Instruction *CtxI = getCtxI())
3805       followUsesInMBEC(*this, A, getState(), *CtxI);
3806   }
3807 
3808   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAAlignImpl3809   ChangeStatus manifest(Attributor &A) override {
3810     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3811 
3812     // Check for users that allow alignment annotations.
3813     Value &AssociatedValue = getAssociatedValue();
3814     for (const Use &U : AssociatedValue.uses()) {
3815       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3816         if (SI->getPointerOperand() == &AssociatedValue)
3817           if (SI->getAlignment() < getAssumedAlign()) {
3818             STATS_DECLTRACK(AAAlign, Store,
3819                             "Number of times alignment added to a store");
3820             SI->setAlignment(Align(getAssumedAlign()));
3821             LoadStoreChanged = ChangeStatus::CHANGED;
3822           }
3823       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3824         if (LI->getPointerOperand() == &AssociatedValue)
3825           if (LI->getAlignment() < getAssumedAlign()) {
3826             LI->setAlignment(Align(getAssumedAlign()));
3827             STATS_DECLTRACK(AAAlign, Load,
3828                             "Number of times alignment added to a load");
3829             LoadStoreChanged = ChangeStatus::CHANGED;
3830           }
3831       }
3832     }
3833 
3834     ChangeStatus Changed = AAAlign::manifest(A);
3835 
3836     Align InheritAlign =
3837         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3838     if (InheritAlign >= getAssumedAlign())
3839       return LoadStoreChanged;
3840     return Changed | LoadStoreChanged;
3841   }
3842 
3843   // TODO: Provide a helper to determine the implied ABI alignment and check in
3844   //       the existing manifest method and a new one for AAAlignImpl that value
3845   //       to avoid making the alignment explicit if it did not improve.
3846 
3847   /// See AbstractAttribute::getDeducedAttributes
3848   virtual void
getDeducedAttributes__anon811b40a70111::AAAlignImpl3849   getDeducedAttributes(LLVMContext &Ctx,
3850                        SmallVectorImpl<Attribute> &Attrs) const override {
3851     if (getAssumedAlign() > 1)
3852       Attrs.emplace_back(
3853           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3854   }
3855 
3856   /// See followUsesInMBEC
followUseInMBEC__anon811b40a70111::AAAlignImpl3857   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3858                        AAAlign::StateType &State) {
3859     bool TrackUse = false;
3860 
3861     unsigned int KnownAlign =
3862         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3863     State.takeKnownMaximum(KnownAlign);
3864 
3865     return TrackUse;
3866   }
3867 
3868   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AAAlignImpl3869   const std::string getAsStr() const override {
3870     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3871                                 "-" + std::to_string(getAssumedAlign()) + ">")
3872                              : "unknown-align";
3873   }
3874 };
3875 
3876 /// Align attribute for a floating value.
3877 struct AAAlignFloating : AAAlignImpl {
AAAlignFloating__anon811b40a70111::AAAlignFloating3878   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3879 
3880   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAAlignFloating3881   ChangeStatus updateImpl(Attributor &A) override {
3882     const DataLayout &DL = A.getDataLayout();
3883 
3884     auto VisitValueCB = [&](Value &V, const Instruction *,
3885                             AAAlign::StateType &T, bool Stripped) -> bool {
3886       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3887                                            DepClassTy::REQUIRED);
3888       if (!Stripped && this == &AA) {
3889         int64_t Offset;
3890         unsigned Alignment = 1;
3891         if (const Value *Base =
3892                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3893           Align PA = Base->getPointerAlignment(DL);
3894           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3895           // So we can say that the maximum power of two which is a divisor of
3896           // gcd(Offset, Alignment) is an alignment.
3897 
3898           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3899                                                uint32_t(PA.value()));
3900           Alignment = llvm::PowerOf2Floor(gcd);
3901         } else {
3902           Alignment = V.getPointerAlignment(DL).value();
3903         }
3904         // Use only IR information if we did not strip anything.
3905         T.takeKnownMaximum(Alignment);
3906         T.indicatePessimisticFixpoint();
3907       } else {
3908         // Use abstract attribute information.
3909         const AAAlign::StateType &DS = AA.getState();
3910         T ^= DS;
3911       }
3912       return T.isValidState();
3913     };
3914 
3915     StateType T;
3916     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3917                                                    VisitValueCB, getCtxI()))
3918       return indicatePessimisticFixpoint();
3919 
3920     // TODO: If we know we visited all incoming values, thus no are assumed
3921     // dead, we can take the known information from the state T.
3922     return clampStateAndIndicateChange(getState(), T);
3923   }
3924 
3925   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAAlignFloating3926   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3927 };
3928 
3929 /// Align attribute for function return value.
3930 struct AAAlignReturned final
3931     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3932   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
AAAlignReturned__anon811b40a70111::AAAlignReturned3933   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3934 
3935   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAAlignReturned3936   void initialize(Attributor &A) override {
3937     Base::initialize(A);
3938     Function *F = getAssociatedFunction();
3939     if (!F || F->isDeclaration())
3940       indicatePessimisticFixpoint();
3941   }
3942 
3943   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAAlignReturned3944   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3945 };
3946 
3947 /// Align attribute for function argument.
3948 struct AAAlignArgument final
3949     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3950   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
AAAlignArgument__anon811b40a70111::AAAlignArgument3951   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3952 
3953   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAAlignArgument3954   ChangeStatus manifest(Attributor &A) override {
3955     // If the associated argument is involved in a must-tail call we give up
3956     // because we would need to keep the argument alignments of caller and
3957     // callee in-sync. Just does not seem worth the trouble right now.
3958     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3959       return ChangeStatus::UNCHANGED;
3960     return Base::manifest(A);
3961   }
3962 
3963   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAAlignArgument3964   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3965 };
3966 
3967 struct AAAlignCallSiteArgument final : AAAlignFloating {
AAAlignCallSiteArgument__anon811b40a70111::AAAlignCallSiteArgument3968   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3969       : AAAlignFloating(IRP, A) {}
3970 
3971   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAAlignCallSiteArgument3972   ChangeStatus manifest(Attributor &A) override {
3973     // If the associated argument is involved in a must-tail call we give up
3974     // because we would need to keep the argument alignments of caller and
3975     // callee in-sync. Just does not seem worth the trouble right now.
3976     if (Argument *Arg = getAssociatedArgument())
3977       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3978         return ChangeStatus::UNCHANGED;
3979     ChangeStatus Changed = AAAlignImpl::manifest(A);
3980     Align InheritAlign =
3981         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3982     if (InheritAlign >= getAssumedAlign())
3983       Changed = ChangeStatus::UNCHANGED;
3984     return Changed;
3985   }
3986 
3987   /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anon811b40a70111::AAAlignCallSiteArgument3988   ChangeStatus updateImpl(Attributor &A) override {
3989     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3990     if (Argument *Arg = getAssociatedArgument()) {
3991       // We only take known information from the argument
3992       // so we do not need to track a dependence.
3993       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3994           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3995       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3996     }
3997     return Changed;
3998   }
3999 
4000   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAAlignCallSiteArgument4001   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4002 };
4003 
4004 /// Align attribute deduction for a call site return value.
4005 struct AAAlignCallSiteReturned final
4006     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4007   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
AAAlignCallSiteReturned__anon811b40a70111::AAAlignCallSiteReturned4008   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4009       : Base(IRP, A) {}
4010 
4011   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAAlignCallSiteReturned4012   void initialize(Attributor &A) override {
4013     Base::initialize(A);
4014     Function *F = getAssociatedFunction();
4015     if (!F || F->isDeclaration())
4016       indicatePessimisticFixpoint();
4017   }
4018 
4019   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAAlignCallSiteReturned4020   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4021 };
4022 
4023 /// ------------------ Function No-Return Attribute ----------------------------
4024 struct AANoReturnImpl : public AANoReturn {
AANoReturnImpl__anon811b40a70111::AANoReturnImpl4025   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4026 
4027   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoReturnImpl4028   void initialize(Attributor &A) override {
4029     AANoReturn::initialize(A);
4030     Function *F = getAssociatedFunction();
4031     if (!F || F->isDeclaration())
4032       indicatePessimisticFixpoint();
4033   }
4034 
4035   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AANoReturnImpl4036   const std::string getAsStr() const override {
4037     return getAssumed() ? "noreturn" : "may-return";
4038   }
4039 
4040   /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anon811b40a70111::AANoReturnImpl4041   virtual ChangeStatus updateImpl(Attributor &A) override {
4042     auto CheckForNoReturn = [](Instruction &) { return false; };
4043     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4044                                    {(unsigned)Instruction::Ret}))
4045       return indicatePessimisticFixpoint();
4046     return ChangeStatus::UNCHANGED;
4047   }
4048 };
4049 
4050 struct AANoReturnFunction final : AANoReturnImpl {
AANoReturnFunction__anon811b40a70111::AANoReturnFunction4051   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4052       : AANoReturnImpl(IRP, A) {}
4053 
4054   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoReturnFunction4055   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4056 };
4057 
4058 /// NoReturn attribute deduction for a call sites.
4059 struct AANoReturnCallSite final : AANoReturnImpl {
AANoReturnCallSite__anon811b40a70111::AANoReturnCallSite4060   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4061       : AANoReturnImpl(IRP, A) {}
4062 
4063   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoReturnCallSite4064   void initialize(Attributor &A) override {
4065     AANoReturnImpl::initialize(A);
4066     if (Function *F = getAssociatedFunction()) {
4067       const IRPosition &FnPos = IRPosition::function(*F);
4068       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4069       if (!FnAA.isAssumedNoReturn())
4070         indicatePessimisticFixpoint();
4071     }
4072   }
4073 
4074   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoReturnCallSite4075   ChangeStatus updateImpl(Attributor &A) override {
4076     // TODO: Once we have call site specific value information we can provide
4077     //       call site specific liveness information and then it makes
4078     //       sense to specialize attributes for call sites arguments instead of
4079     //       redirecting requests to the callee argument.
4080     Function *F = getAssociatedFunction();
4081     const IRPosition &FnPos = IRPosition::function(*F);
4082     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4083     return clampStateAndIndicateChange(getState(), FnAA.getState());
4084   }
4085 
4086   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoReturnCallSite4087   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4088 };
4089 
4090 /// ----------------------- Variable Capturing ---------------------------------
4091 
4092 /// A class to hold the state of for no-capture attributes.
4093 struct AANoCaptureImpl : public AANoCapture {
AANoCaptureImpl__anon811b40a70111::AANoCaptureImpl4094   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4095 
4096   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoCaptureImpl4097   void initialize(Attributor &A) override {
4098     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4099       indicateOptimisticFixpoint();
4100       return;
4101     }
4102     Function *AnchorScope = getAnchorScope();
4103     if (isFnInterfaceKind() &&
4104         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4105       indicatePessimisticFixpoint();
4106       return;
4107     }
4108 
4109     // You cannot "capture" null in the default address space.
4110     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4111         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4112       indicateOptimisticFixpoint();
4113       return;
4114     }
4115 
4116     const Function *F =
4117         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4118 
4119     // Check what state the associated function can actually capture.
4120     if (F)
4121       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4122     else
4123       indicatePessimisticFixpoint();
4124   }
4125 
4126   /// See AbstractAttribute::updateImpl(...).
4127   ChangeStatus updateImpl(Attributor &A) override;
4128 
4129   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4130   virtual void
getDeducedAttributes__anon811b40a70111::AANoCaptureImpl4131   getDeducedAttributes(LLVMContext &Ctx,
4132                        SmallVectorImpl<Attribute> &Attrs) const override {
4133     if (!isAssumedNoCaptureMaybeReturned())
4134       return;
4135 
4136     if (isArgumentPosition()) {
4137       if (isAssumedNoCapture())
4138         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4139       else if (ManifestInternal)
4140         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4141     }
4142   }
4143 
4144   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4145   /// depending on the ability of the function associated with \p IRP to capture
4146   /// state in memory and through "returning/throwing", respectively.
determineFunctionCaptureCapabilities__anon811b40a70111::AANoCaptureImpl4147   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4148                                                    const Function &F,
4149                                                    BitIntegerState &State) {
4150     // TODO: Once we have memory behavior attributes we should use them here.
4151 
4152     // If we know we cannot communicate or write to memory, we do not care about
4153     // ptr2int anymore.
4154     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4155         F.getReturnType()->isVoidTy()) {
4156       State.addKnownBits(NO_CAPTURE);
4157       return;
4158     }
4159 
4160     // A function cannot capture state in memory if it only reads memory, it can
4161     // however return/throw state and the state might be influenced by the
4162     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4163     if (F.onlyReadsMemory())
4164       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4165 
4166     // A function cannot communicate state back if it does not through
4167     // exceptions and doesn not return values.
4168     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4169       State.addKnownBits(NOT_CAPTURED_IN_RET);
4170 
4171     // Check existing "returned" attributes.
4172     int ArgNo = IRP.getCalleeArgNo();
4173     if (F.doesNotThrow() && ArgNo >= 0) {
4174       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4175         if (F.hasParamAttribute(u, Attribute::Returned)) {
4176           if (u == unsigned(ArgNo))
4177             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4178           else if (F.onlyReadsMemory())
4179             State.addKnownBits(NO_CAPTURE);
4180           else
4181             State.addKnownBits(NOT_CAPTURED_IN_RET);
4182           break;
4183         }
4184     }
4185   }
4186 
4187   /// See AbstractState::getAsStr().
getAsStr__anon811b40a70111::AANoCaptureImpl4188   const std::string getAsStr() const override {
4189     if (isKnownNoCapture())
4190       return "known not-captured";
4191     if (isAssumedNoCapture())
4192       return "assumed not-captured";
4193     if (isKnownNoCaptureMaybeReturned())
4194       return "known not-captured-maybe-returned";
4195     if (isAssumedNoCaptureMaybeReturned())
4196       return "assumed not-captured-maybe-returned";
4197     return "assumed-captured";
4198   }
4199 };
4200 
4201 /// Attributor-aware capture tracker.
4202 struct AACaptureUseTracker final : public CaptureTracker {
4203 
4204   /// Create a capture tracker that can lookup in-flight abstract attributes
4205   /// through the Attributor \p A.
4206   ///
4207   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4208   /// search is stopped. If a use leads to a return instruction,
4209   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4210   /// If a use leads to a ptr2int which may capture the value,
4211   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4212   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4213   /// set. All values in \p PotentialCopies are later tracked as well. For every
4214   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4215   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4216   /// conservatively set to true.
AACaptureUseTracker__anon811b40a70111::AACaptureUseTracker4217   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4218                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4219                       SmallVectorImpl<const Value *> &PotentialCopies,
4220                       unsigned &RemainingUsesToExplore)
4221       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4222         PotentialCopies(PotentialCopies),
4223         RemainingUsesToExplore(RemainingUsesToExplore) {}
4224 
4225   /// Determine if \p V maybe captured. *Also updates the state!*
valueMayBeCaptured__anon811b40a70111::AACaptureUseTracker4226   bool valueMayBeCaptured(const Value *V) {
4227     if (V->getType()->isPointerTy()) {
4228       PointerMayBeCaptured(V, this);
4229     } else {
4230       State.indicatePessimisticFixpoint();
4231     }
4232     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4233   }
4234 
4235   /// See CaptureTracker::tooManyUses().
tooManyUses__anon811b40a70111::AACaptureUseTracker4236   void tooManyUses() override {
4237     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4238   }
4239 
isDereferenceableOrNull__anon811b40a70111::AACaptureUseTracker4240   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4241     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4242       return true;
4243     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4244         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4245     return DerefAA.getAssumedDereferenceableBytes();
4246   }
4247 
4248   /// See CaptureTracker::captured(...).
captured__anon811b40a70111::AACaptureUseTracker4249   bool captured(const Use *U) override {
4250     Instruction *UInst = cast<Instruction>(U->getUser());
4251     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4252                       << "\n");
4253 
4254     // Because we may reuse the tracker multiple times we keep track of the
4255     // number of explored uses ourselves as well.
4256     if (RemainingUsesToExplore-- == 0) {
4257       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4258       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4259                           /* Return */ true);
4260     }
4261 
4262     // Deal with ptr2int by following uses.
4263     if (isa<PtrToIntInst>(UInst)) {
4264       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4265       return valueMayBeCaptured(UInst);
4266     }
4267 
4268     // Explicitly catch return instructions.
4269     if (isa<ReturnInst>(UInst))
4270       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4271                           /* Return */ true);
4272 
4273     // For now we only use special logic for call sites. However, the tracker
4274     // itself knows about a lot of other non-capturing cases already.
4275     auto *CB = dyn_cast<CallBase>(UInst);
4276     if (!CB || !CB->isArgOperand(U))
4277       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4278                           /* Return */ true);
4279 
4280     unsigned ArgNo = CB->getArgOperandNo(U);
4281     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4282     // If we have a abstract no-capture attribute for the argument we can use
4283     // it to justify a non-capture attribute here. This allows recursion!
4284     auto &ArgNoCaptureAA =
4285         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4286     if (ArgNoCaptureAA.isAssumedNoCapture())
4287       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4288                           /* Return */ false);
4289     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4290       addPotentialCopy(*CB);
4291       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4292                           /* Return */ false);
4293     }
4294 
4295     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4296     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4297                         /* Return */ true);
4298   }
4299 
4300   /// Register \p CS as potential copy of the value we are checking.
addPotentialCopy__anon811b40a70111::AACaptureUseTracker4301   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4302 
4303   /// See CaptureTracker::shouldExplore(...).
shouldExplore__anon811b40a70111::AACaptureUseTracker4304   bool shouldExplore(const Use *U) override {
4305     // Check liveness and ignore droppable users.
4306     return !U->getUser()->isDroppable() &&
4307            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4308   }
4309 
4310   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4311   /// \p CapturedInRet, then return the appropriate value for use in the
4312   /// CaptureTracker::captured() interface.
isCapturedIn__anon811b40a70111::AACaptureUseTracker4313   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4314                     bool CapturedInRet) {
4315     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4316                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4317     if (CapturedInMem)
4318       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4319     if (CapturedInInt)
4320       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4321     if (CapturedInRet)
4322       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4323     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4324   }
4325 
4326 private:
4327   /// The attributor providing in-flight abstract attributes.
4328   Attributor &A;
4329 
4330   /// The abstract attribute currently updated.
4331   AANoCapture &NoCaptureAA;
4332 
4333   /// The abstract liveness state.
4334   const AAIsDead &IsDeadAA;
4335 
4336   /// The state currently updated.
4337   AANoCapture::StateType &State;
4338 
4339   /// Set of potential copies of the tracked value.
4340   SmallVectorImpl<const Value *> &PotentialCopies;
4341 
4342   /// Global counter to limit the number of explored uses.
4343   unsigned &RemainingUsesToExplore;
4344 };
4345 
updateImpl(Attributor & A)4346 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4347   const IRPosition &IRP = getIRPosition();
4348   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4349                                         : &IRP.getAssociatedValue();
4350   if (!V)
4351     return indicatePessimisticFixpoint();
4352 
4353   const Function *F =
4354       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4355   assert(F && "Expected a function!");
4356   const IRPosition &FnPos = IRPosition::function(*F);
4357   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4358 
4359   AANoCapture::StateType T;
4360 
4361   // Readonly means we cannot capture through memory.
4362   const auto &FnMemAA =
4363       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4364   if (FnMemAA.isAssumedReadOnly()) {
4365     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4366     if (FnMemAA.isKnownReadOnly())
4367       addKnownBits(NOT_CAPTURED_IN_MEM);
4368     else
4369       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4370   }
4371 
4372   // Make sure all returned values are different than the underlying value.
4373   // TODO: we could do this in a more sophisticated way inside
4374   //       AAReturnedValues, e.g., track all values that escape through returns
4375   //       directly somehow.
4376   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4377     bool SeenConstant = false;
4378     for (auto &It : RVAA.returned_values()) {
4379       if (isa<Constant>(It.first)) {
4380         if (SeenConstant)
4381           return false;
4382         SeenConstant = true;
4383       } else if (!isa<Argument>(It.first) ||
4384                  It.first == getAssociatedArgument())
4385         return false;
4386     }
4387     return true;
4388   };
4389 
4390   const auto &NoUnwindAA =
4391       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4392   if (NoUnwindAA.isAssumedNoUnwind()) {
4393     bool IsVoidTy = F->getReturnType()->isVoidTy();
4394     const AAReturnedValues *RVAA =
4395         IsVoidTy ? nullptr
4396                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4397 
4398                                                  DepClassTy::OPTIONAL);
4399     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4400       T.addKnownBits(NOT_CAPTURED_IN_RET);
4401       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4402         return ChangeStatus::UNCHANGED;
4403       if (NoUnwindAA.isKnownNoUnwind() &&
4404           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4405         addKnownBits(NOT_CAPTURED_IN_RET);
4406         if (isKnown(NOT_CAPTURED_IN_MEM))
4407           return indicateOptimisticFixpoint();
4408       }
4409     }
4410   }
4411 
4412   // Use the CaptureTracker interface and logic with the specialized tracker,
4413   // defined in AACaptureUseTracker, that can look at in-flight abstract
4414   // attributes and directly updates the assumed state.
4415   SmallVector<const Value *, 4> PotentialCopies;
4416   unsigned RemainingUsesToExplore =
4417       getDefaultMaxUsesToExploreForCaptureTracking();
4418   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4419                               RemainingUsesToExplore);
4420 
4421   // Check all potential copies of the associated value until we can assume
4422   // none will be captured or we have to assume at least one might be.
4423   unsigned Idx = 0;
4424   PotentialCopies.push_back(V);
4425   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4426     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4427 
4428   AANoCapture::StateType &S = getState();
4429   auto Assumed = S.getAssumed();
4430   S.intersectAssumedBits(T.getAssumed());
4431   if (!isAssumedNoCaptureMaybeReturned())
4432     return indicatePessimisticFixpoint();
4433   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4434                                    : ChangeStatus::CHANGED;
4435 }
4436 
4437 /// NoCapture attribute for function arguments.
4438 struct AANoCaptureArgument final : AANoCaptureImpl {
AANoCaptureArgument__anon811b40a70111::AANoCaptureArgument4439   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4440       : AANoCaptureImpl(IRP, A) {}
4441 
4442   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoCaptureArgument4443   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4444 };
4445 
4446 /// NoCapture attribute for call site arguments.
4447 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
AANoCaptureCallSiteArgument__anon811b40a70111::AANoCaptureCallSiteArgument4448   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4449       : AANoCaptureImpl(IRP, A) {}
4450 
4451   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoCaptureCallSiteArgument4452   void initialize(Attributor &A) override {
4453     if (Argument *Arg = getAssociatedArgument())
4454       if (Arg->hasByValAttr())
4455         indicateOptimisticFixpoint();
4456     AANoCaptureImpl::initialize(A);
4457   }
4458 
4459   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoCaptureCallSiteArgument4460   ChangeStatus updateImpl(Attributor &A) override {
4461     // TODO: Once we have call site specific value information we can provide
4462     //       call site specific liveness information and then it makes
4463     //       sense to specialize attributes for call sites arguments instead of
4464     //       redirecting requests to the callee argument.
4465     Argument *Arg = getAssociatedArgument();
4466     if (!Arg)
4467       return indicatePessimisticFixpoint();
4468     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4469     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4470     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4471   }
4472 
4473   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoCaptureCallSiteArgument4474   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4475 };
4476 
4477 /// NoCapture attribute for floating values.
4478 struct AANoCaptureFloating final : AANoCaptureImpl {
AANoCaptureFloating__anon811b40a70111::AANoCaptureFloating4479   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4480       : AANoCaptureImpl(IRP, A) {}
4481 
4482   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoCaptureFloating4483   void trackStatistics() const override {
4484     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4485   }
4486 };
4487 
4488 /// NoCapture attribute for function return value.
4489 struct AANoCaptureReturned final : AANoCaptureImpl {
AANoCaptureReturned__anon811b40a70111::AANoCaptureReturned4490   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4491       : AANoCaptureImpl(IRP, A) {
4492     llvm_unreachable("NoCapture is not applicable to function returns!");
4493   }
4494 
4495   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoCaptureReturned4496   void initialize(Attributor &A) override {
4497     llvm_unreachable("NoCapture is not applicable to function returns!");
4498   }
4499 
4500   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AANoCaptureReturned4501   ChangeStatus updateImpl(Attributor &A) override {
4502     llvm_unreachable("NoCapture is not applicable to function returns!");
4503   }
4504 
4505   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoCaptureReturned4506   void trackStatistics() const override {}
4507 };
4508 
4509 /// NoCapture attribute deduction for a call site return value.
4510 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
AANoCaptureCallSiteReturned__anon811b40a70111::AANoCaptureCallSiteReturned4511   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4512       : AANoCaptureImpl(IRP, A) {}
4513 
4514   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AANoCaptureCallSiteReturned4515   void initialize(Attributor &A) override {
4516     const Function *F = getAnchorScope();
4517     // Check what state the associated function can actually capture.
4518     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4519   }
4520 
4521   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AANoCaptureCallSiteReturned4522   void trackStatistics() const override {
4523     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4524   }
4525 };
4526 
4527 /// ------------------ Value Simplify Attribute ----------------------------
4528 struct AAValueSimplifyImpl : AAValueSimplify {
AAValueSimplifyImpl__anon811b40a70111::AAValueSimplifyImpl4529   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4530       : AAValueSimplify(IRP, A) {}
4531 
4532   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAValueSimplifyImpl4533   void initialize(Attributor &A) override {
4534     if (getAssociatedValue().getType()->isVoidTy())
4535       indicatePessimisticFixpoint();
4536   }
4537 
4538   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a70111::AAValueSimplifyImpl4539   const std::string getAsStr() const override {
4540     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4541                         : "not-simple";
4542   }
4543 
4544   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAValueSimplifyImpl4545   void trackStatistics() const override {}
4546 
4547   /// See AAValueSimplify::getAssumedSimplifiedValue()
getAssumedSimplifiedValue__anon811b40a70111::AAValueSimplifyImpl4548   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4549     if (!getAssumed())
4550       return const_cast<Value *>(&getAssociatedValue());
4551     return SimplifiedAssociatedValue;
4552   }
4553 
4554   /// Helper function for querying AAValueSimplify and updating candicate.
4555   /// \param QueryingValue Value trying to unify with SimplifiedValue
4556   /// \param AccumulatedSimplifiedValue Current simplification result.
checkAndUpdate__anon811b40a70111::AAValueSimplifyImpl4557   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4558                              Value &QueryingValue,
4559                              Optional<Value *> &AccumulatedSimplifiedValue) {
4560     // FIXME: Add a typecast support.
4561 
4562     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4563         QueryingAA,
4564         IRPosition::value(QueryingValue, QueryingAA.getCallBaseContext()),
4565         DepClassTy::REQUIRED);
4566 
4567     Optional<Value *> QueryingValueSimplified =
4568         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4569 
4570     if (!QueryingValueSimplified.hasValue())
4571       return true;
4572 
4573     if (!QueryingValueSimplified.getValue())
4574       return false;
4575 
4576     Value &QueryingValueSimplifiedUnwrapped =
4577         *QueryingValueSimplified.getValue();
4578 
4579     if (AccumulatedSimplifiedValue.hasValue() &&
4580         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4581         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4582       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4583     if (AccumulatedSimplifiedValue.hasValue() &&
4584         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4585       return true;
4586 
4587     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4588                       << " is assumed to be "
4589                       << QueryingValueSimplifiedUnwrapped << "\n");
4590 
4591     AccumulatedSimplifiedValue = QueryingValueSimplified;
4592     return true;
4593   }
4594 
4595   /// Returns a candidate is found or not
askSimplifiedValueFor__anon811b40a70111::AAValueSimplifyImpl4596   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4597     if (!getAssociatedValue().getType()->isIntegerTy())
4598       return false;
4599 
4600     // This will also pass the call base context.
4601     const auto &AA =
4602         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4603 
4604     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4605 
4606     if (!COpt.hasValue()) {
4607       SimplifiedAssociatedValue = llvm::None;
4608       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4609       return true;
4610     }
4611     if (auto *C = COpt.getValue()) {
4612       SimplifiedAssociatedValue = C;
4613       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4614       return true;
4615     }
4616     return false;
4617   }
4618 
askSimplifiedValueForOtherAAs__anon811b40a70111::AAValueSimplifyImpl4619   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4620     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4621       return true;
4622     if (askSimplifiedValueFor<AAPotentialValues>(A))
4623       return true;
4624     return false;
4625   }
4626 
4627   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAValueSimplifyImpl4628   ChangeStatus manifest(Attributor &A) override {
4629     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4630 
4631     if (SimplifiedAssociatedValue.hasValue() &&
4632         !SimplifiedAssociatedValue.getValue())
4633       return Changed;
4634 
4635     Value &V = getAssociatedValue();
4636     auto *C = SimplifiedAssociatedValue.hasValue()
4637                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4638                   : UndefValue::get(V.getType());
4639     if (C) {
4640       // We can replace the AssociatedValue with the constant.
4641       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4642         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4643                           << " :: " << *this << "\n");
4644         if (A.changeValueAfterManifest(V, *C))
4645           Changed = ChangeStatus::CHANGED;
4646       }
4647     }
4648 
4649     return Changed | AAValueSimplify::manifest(A);
4650   }
4651 
4652   /// See AbstractState::indicatePessimisticFixpoint(...).
indicatePessimisticFixpoint__anon811b40a70111::AAValueSimplifyImpl4653   ChangeStatus indicatePessimisticFixpoint() override {
4654     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4655     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4656     SimplifiedAssociatedValue = &getAssociatedValue();
4657     indicateOptimisticFixpoint();
4658     return ChangeStatus::CHANGED;
4659   }
4660 
4661 protected:
4662   // An assumed simplified value. Initially, it is set to Optional::None, which
4663   // means that the value is not clear under current assumption. If in the
4664   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4665   // returns orignal associated value.
4666   Optional<Value *> SimplifiedAssociatedValue;
4667 };
4668 
4669 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
AAValueSimplifyArgument__anon811b40a70111::AAValueSimplifyArgument4670   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4671       : AAValueSimplifyImpl(IRP, A) {}
4672 
initialize__anon811b40a70111::AAValueSimplifyArgument4673   void initialize(Attributor &A) override {
4674     AAValueSimplifyImpl::initialize(A);
4675     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4676       indicatePessimisticFixpoint();
4677     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4678                  Attribute::StructRet, Attribute::Nest},
4679                 /* IgnoreSubsumingPositions */ true))
4680       indicatePessimisticFixpoint();
4681 
4682     // FIXME: This is a hack to prevent us from propagating function poiner in
4683     // the new pass manager CGSCC pass as it creates call edges the
4684     // CallGraphUpdater cannot handle yet.
4685     Value &V = getAssociatedValue();
4686     if (V.getType()->isPointerTy() &&
4687         V.getType()->getPointerElementType()->isFunctionTy() &&
4688         !A.isModulePass())
4689       indicatePessimisticFixpoint();
4690   }
4691 
4692   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAValueSimplifyArgument4693   ChangeStatus updateImpl(Attributor &A) override {
4694     // Byval is only replacable if it is readonly otherwise we would write into
4695     // the replaced value and not the copy that byval creates implicitly.
4696     Argument *Arg = getAssociatedArgument();
4697     if (Arg->hasByValAttr()) {
4698       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4699       //       there is no race by not copying a constant byval.
4700       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4701                                                        DepClassTy::REQUIRED);
4702       if (!MemAA.isAssumedReadOnly())
4703         return indicatePessimisticFixpoint();
4704     }
4705 
4706     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4707 
4708     auto PredForCallSite = [&](AbstractCallSite ACS) {
4709       const IRPosition &ACSArgPos =
4710           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4711       // Check if a coresponding argument was found or if it is on not
4712       // associated (which can happen for callback calls).
4713       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4714         return false;
4715 
4716       // We can only propagate thread independent values through callbacks.
4717       // This is different to direct/indirect call sites because for them we
4718       // know the thread executing the caller and callee is the same. For
4719       // callbacks this is not guaranteed, thus a thread dependent value could
4720       // be different for the caller and callee, making it invalid to propagate.
4721       Value &ArgOp = ACSArgPos.getAssociatedValue();
4722       if (ACS.isCallbackCall())
4723         if (auto *C = dyn_cast<Constant>(&ArgOp))
4724           if (C->isThreadDependent())
4725             return false;
4726       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4727     };
4728 
4729     // Generate a answer specific to a call site context.
4730     bool Success;
4731     bool AllCallSitesKnown;
4732     if (hasCallBaseContext())
4733       Success = PredForCallSite(
4734           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
4735     else
4736       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
4737                                        AllCallSitesKnown);
4738 
4739     if (!Success)
4740       if (!askSimplifiedValueForOtherAAs(A))
4741         return indicatePessimisticFixpoint();
4742 
4743     // If a candicate was found in this update, return CHANGED.
4744     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4745                ? ChangeStatus::UNCHANGED
4746                : ChangeStatus ::CHANGED;
4747   }
4748 
4749   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAValueSimplifyArgument4750   void trackStatistics() const override {
4751     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4752   }
4753 };
4754 
4755 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
AAValueSimplifyReturned__anon811b40a70111::AAValueSimplifyReturned4756   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4757       : AAValueSimplifyImpl(IRP, A) {}
4758 
4759   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAValueSimplifyReturned4760   ChangeStatus updateImpl(Attributor &A) override {
4761     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4762 
4763     auto PredForReturned = [&](Value &V) {
4764       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4765     };
4766 
4767     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4768       if (!askSimplifiedValueForOtherAAs(A))
4769         return indicatePessimisticFixpoint();
4770 
4771     // If a candicate was found in this update, return CHANGED.
4772     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4773                ? ChangeStatus::UNCHANGED
4774                : ChangeStatus ::CHANGED;
4775   }
4776 
manifest__anon811b40a70111::AAValueSimplifyReturned4777   ChangeStatus manifest(Attributor &A) override {
4778     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4779 
4780     if (SimplifiedAssociatedValue.hasValue() &&
4781         !SimplifiedAssociatedValue.getValue())
4782       return Changed;
4783 
4784     Value &V = getAssociatedValue();
4785     auto *C = SimplifiedAssociatedValue.hasValue()
4786                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4787                   : UndefValue::get(V.getType());
4788     if (C) {
4789       auto PredForReturned =
4790           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4791             // We can replace the AssociatedValue with the constant.
4792             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4793               return true;
4794 
4795             for (ReturnInst *RI : RetInsts) {
4796               if (RI->getFunction() != getAnchorScope())
4797                 continue;
4798               auto *RC = C;
4799               if (RC->getType() != RI->getReturnValue()->getType())
4800                 RC = ConstantExpr::getBitCast(RC,
4801                                               RI->getReturnValue()->getType());
4802               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4803                                 << " in " << *RI << " :: " << *this << "\n");
4804               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4805                 Changed = ChangeStatus::CHANGED;
4806             }
4807             return true;
4808           };
4809       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4810     }
4811 
4812     return Changed | AAValueSimplify::manifest(A);
4813   }
4814 
4815   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAValueSimplifyReturned4816   void trackStatistics() const override {
4817     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4818   }
4819 };
4820 
4821 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
AAValueSimplifyFloating__anon811b40a70111::AAValueSimplifyFloating4822   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4823       : AAValueSimplifyImpl(IRP, A) {}
4824 
4825   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAValueSimplifyFloating4826   void initialize(Attributor &A) override {
4827     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4828     //        Needs investigation.
4829     // AAValueSimplifyImpl::initialize(A);
4830     Value &V = getAnchorValue();
4831 
4832     // TODO: add other stuffs
4833     if (isa<Constant>(V))
4834       indicatePessimisticFixpoint();
4835   }
4836 
4837   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4838   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4839   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4840   /// updated and \p Changed is set appropriately.
checkForNullPtrCompare__anon811b40a70111::AAValueSimplifyFloating4841   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4842                               ChangeStatus &Changed) {
4843     if (!ICmp)
4844       return false;
4845     if (!ICmp->isEquality())
4846       return false;
4847 
4848     // This is a comparison with == or !-. We check for nullptr now.
4849     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4850     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4851     if (!Op0IsNull && !Op1IsNull)
4852       return false;
4853 
4854     LLVMContext &Ctx = ICmp->getContext();
4855     // Check for `nullptr ==/!= nullptr` first:
4856     if (Op0IsNull && Op1IsNull) {
4857       Value *NewVal = ConstantInt::get(
4858           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4859       assert(!SimplifiedAssociatedValue.hasValue() &&
4860              "Did not expect non-fixed value for constant comparison");
4861       SimplifiedAssociatedValue = NewVal;
4862       indicateOptimisticFixpoint();
4863       Changed = ChangeStatus::CHANGED;
4864       return true;
4865     }
4866 
4867     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4868     // non-nullptr operand and if we assume it's non-null we can conclude the
4869     // result of the comparison.
4870     assert((Op0IsNull || Op1IsNull) &&
4871            "Expected nullptr versus non-nullptr comparison at this point");
4872 
4873     // The index is the operand that we assume is not null.
4874     unsigned PtrIdx = Op0IsNull;
4875     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4876         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4877         DepClassTy::REQUIRED);
4878     if (!PtrNonNullAA.isAssumedNonNull())
4879       return false;
4880 
4881     // The new value depends on the predicate, true for != and false for ==.
4882     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4883                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4884 
4885     assert((!SimplifiedAssociatedValue.hasValue() ||
4886             SimplifiedAssociatedValue == NewVal) &&
4887            "Did not expect to change value for zero-comparison");
4888 
4889     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4890     SimplifiedAssociatedValue = NewVal;
4891 
4892     if (PtrNonNullAA.isKnownNonNull())
4893       indicateOptimisticFixpoint();
4894 
4895     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4896     return true;
4897   }
4898 
4899   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAValueSimplifyFloating4900   ChangeStatus updateImpl(Attributor &A) override {
4901     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4902 
4903     ChangeStatus Changed;
4904     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4905                                Changed))
4906       return Changed;
4907 
4908     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4909                             bool Stripped) -> bool {
4910       auto &AA = A.getAAFor<AAValueSimplify>(
4911           *this, IRPosition::value(V, getCallBaseContext()),
4912           DepClassTy::REQUIRED);
4913       if (!Stripped && this == &AA) {
4914         // TODO: Look the instruction and check recursively.
4915 
4916         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4917                           << "\n");
4918         return false;
4919       }
4920       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4921     };
4922 
4923     bool Dummy = false;
4924     if (!genericValueTraversal<AAValueSimplify, bool>(
4925             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4926             /* UseValueSimplify */ false))
4927       if (!askSimplifiedValueForOtherAAs(A))
4928         return indicatePessimisticFixpoint();
4929 
4930     // If a candicate was found in this update, return CHANGED.
4931 
4932     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4933                ? ChangeStatus::UNCHANGED
4934                : ChangeStatus ::CHANGED;
4935   }
4936 
4937   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAValueSimplifyFloating4938   void trackStatistics() const override {
4939     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4940   }
4941 };
4942 
4943 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
AAValueSimplifyFunction__anon811b40a70111::AAValueSimplifyFunction4944   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4945       : AAValueSimplifyImpl(IRP, A) {}
4946 
4947   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAValueSimplifyFunction4948   void initialize(Attributor &A) override {
4949     SimplifiedAssociatedValue = &getAnchorValue();
4950     indicateOptimisticFixpoint();
4951   }
4952   /// See AbstractAttribute::initialize(...).
updateImpl__anon811b40a70111::AAValueSimplifyFunction4953   ChangeStatus updateImpl(Attributor &A) override {
4954     llvm_unreachable(
4955         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4956   }
4957   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAValueSimplifyFunction4958   void trackStatistics() const override {
4959     STATS_DECLTRACK_FN_ATTR(value_simplify)
4960   }
4961 };
4962 
4963 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
AAValueSimplifyCallSite__anon811b40a70111::AAValueSimplifyCallSite4964   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4965       : AAValueSimplifyFunction(IRP, A) {}
4966   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAValueSimplifyCallSite4967   void trackStatistics() const override {
4968     STATS_DECLTRACK_CS_ATTR(value_simplify)
4969   }
4970 };
4971 
4972 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
AAValueSimplifyCallSiteReturned__anon811b40a70111::AAValueSimplifyCallSiteReturned4973   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4974       : AAValueSimplifyReturned(IRP, A) {}
4975 
4976   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAValueSimplifyCallSiteReturned4977   ChangeStatus manifest(Attributor &A) override {
4978     return AAValueSimplifyImpl::manifest(A);
4979   }
4980 
trackStatistics__anon811b40a70111::AAValueSimplifyCallSiteReturned4981   void trackStatistics() const override {
4982     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4983   }
4984 };
4985 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
AAValueSimplifyCallSiteArgument__anon811b40a70111::AAValueSimplifyCallSiteArgument4986   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4987       : AAValueSimplifyFloating(IRP, A) {}
4988 
4989   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAValueSimplifyCallSiteArgument4990   ChangeStatus manifest(Attributor &A) override {
4991     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4992 
4993     if (SimplifiedAssociatedValue.hasValue() &&
4994         !SimplifiedAssociatedValue.getValue())
4995       return Changed;
4996 
4997     Value &V = getAssociatedValue();
4998     auto *C = SimplifiedAssociatedValue.hasValue()
4999                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
5000                   : UndefValue::get(V.getType());
5001     if (C) {
5002       Use &U = cast<CallBase>(&getAnchorValue())
5003                    ->getArgOperandUse(getCallSiteArgNo());
5004       // We can replace the AssociatedValue with the constant.
5005       if (&V != C && V.getType() == C->getType()) {
5006         if (A.changeUseAfterManifest(U, *C))
5007           Changed = ChangeStatus::CHANGED;
5008       }
5009     }
5010 
5011     return Changed | AAValueSimplify::manifest(A);
5012   }
5013 
trackStatistics__anon811b40a70111::AAValueSimplifyCallSiteArgument5014   void trackStatistics() const override {
5015     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5016   }
5017 };
5018 
5019 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5020 struct AAHeapToStackImpl : public AAHeapToStack {
AAHeapToStackImpl__anon811b40a70111::AAHeapToStackImpl5021   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5022       : AAHeapToStack(IRP, A) {}
5023 
getAsStr__anon811b40a70111::AAHeapToStackImpl5024   const std::string getAsStr() const override {
5025     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5026   }
5027 
manifest__anon811b40a70111::AAHeapToStackImpl5028   ChangeStatus manifest(Attributor &A) override {
5029     assert(getState().isValidState() &&
5030            "Attempted to manifest an invalid state!");
5031 
5032     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5033     Function *F = getAnchorScope();
5034     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5035 
5036     for (Instruction *MallocCall : MallocCalls) {
5037       // This malloc cannot be replaced.
5038       if (BadMallocCalls.count(MallocCall))
5039         continue;
5040 
5041       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5042         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5043         A.deleteAfterManifest(*FreeCall);
5044         HasChanged = ChangeStatus::CHANGED;
5045       }
5046 
5047       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5048                         << "\n");
5049 
5050       Align Alignment;
5051       Value *Size;
5052       if (isCallocLikeFn(MallocCall, TLI)) {
5053         auto *Num = MallocCall->getOperand(0);
5054         auto *SizeT = MallocCall->getOperand(1);
5055         IRBuilder<> B(MallocCall);
5056         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5057       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5058         Size = MallocCall->getOperand(1);
5059         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5060                                    ->getValue()
5061                                    .getZExtValue())
5062                         .valueOrOne();
5063       } else {
5064         Size = MallocCall->getOperand(0);
5065       }
5066 
5067       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5068       Instruction *AI =
5069           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5070                          "", MallocCall->getNextNode());
5071 
5072       if (AI->getType() != MallocCall->getType())
5073         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5074                              AI->getNextNode());
5075 
5076       A.changeValueAfterManifest(*MallocCall, *AI);
5077 
5078       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5079         auto *NBB = II->getNormalDest();
5080         BranchInst::Create(NBB, MallocCall->getParent());
5081         A.deleteAfterManifest(*MallocCall);
5082       } else {
5083         A.deleteAfterManifest(*MallocCall);
5084       }
5085 
5086       // Zero out the allocated memory if it was a calloc.
5087       if (isCallocLikeFn(MallocCall, TLI)) {
5088         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5089                                    AI->getNextNode());
5090         Value *Ops[] = {
5091             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5092             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5093 
5094         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5095         Module *M = F->getParent();
5096         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5097         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5098       }
5099       HasChanged = ChangeStatus::CHANGED;
5100     }
5101 
5102     return HasChanged;
5103   }
5104 
5105   /// Collection of all malloc calls in a function.
5106   SmallSetVector<Instruction *, 4> MallocCalls;
5107 
5108   /// Collection of malloc calls that cannot be converted.
5109   DenseSet<const Instruction *> BadMallocCalls;
5110 
5111   /// A map for each malloc call to the set of associated free calls.
5112   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5113 
5114   ChangeStatus updateImpl(Attributor &A) override;
5115 };
5116 
updateImpl(Attributor & A)5117 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5118   const Function *F = getAnchorScope();
5119   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5120 
5121   MustBeExecutedContextExplorer &Explorer =
5122       A.getInfoCache().getMustBeExecutedContextExplorer();
5123 
5124   auto FreeCheck = [&](Instruction &I) {
5125     const auto &Frees = FreesForMalloc.lookup(&I);
5126     if (Frees.size() != 1)
5127       return false;
5128     Instruction *UniqueFree = *Frees.begin();
5129     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5130   };
5131 
5132   auto UsesCheck = [&](Instruction &I) {
5133     bool ValidUsesOnly = true;
5134     bool MustUse = true;
5135     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5136       Instruction *UserI = cast<Instruction>(U.getUser());
5137       if (isa<LoadInst>(UserI))
5138         return true;
5139       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5140         if (SI->getValueOperand() == U.get()) {
5141           LLVM_DEBUG(dbgs()
5142                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5143           ValidUsesOnly = false;
5144         } else {
5145           // A store into the malloc'ed memory is fine.
5146         }
5147         return true;
5148       }
5149       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5150         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5151           return true;
5152         // Record malloc.
5153         if (isFreeCall(UserI, TLI)) {
5154           if (MustUse) {
5155             FreesForMalloc[&I].insert(UserI);
5156           } else {
5157             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5158                               << *UserI << "\n");
5159             ValidUsesOnly = false;
5160           }
5161           return true;
5162         }
5163 
5164         unsigned ArgNo = CB->getArgOperandNo(&U);
5165 
5166         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5167             *this, IRPosition::callsite_argument(*CB, ArgNo),
5168             DepClassTy::REQUIRED);
5169 
5170         // If a callsite argument use is nofree, we are fine.
5171         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5172             *this, IRPosition::callsite_argument(*CB, ArgNo),
5173             DepClassTy::REQUIRED);
5174 
5175         if (!NoCaptureAA.isAssumedNoCapture() ||
5176             !ArgNoFreeAA.isAssumedNoFree()) {
5177           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5178           ValidUsesOnly = false;
5179         }
5180         return true;
5181       }
5182 
5183       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5184           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5185         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5186         Follow = true;
5187         return true;
5188       }
5189       // Unknown user for which we can not track uses further (in a way that
5190       // makes sense).
5191       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5192       ValidUsesOnly = false;
5193       return true;
5194     };
5195     A.checkForAllUses(Pred, *this, I);
5196     return ValidUsesOnly;
5197   };
5198 
5199   auto MallocCallocCheck = [&](Instruction &I) {
5200     if (BadMallocCalls.count(&I))
5201       return true;
5202 
5203     bool IsMalloc = isMallocLikeFn(&I, TLI);
5204     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5205     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5206     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5207       BadMallocCalls.insert(&I);
5208       return true;
5209     }
5210 
5211     if (IsMalloc) {
5212       if (MaxHeapToStackSize == -1) {
5213         if (UsesCheck(I) || FreeCheck(I)) {
5214           MallocCalls.insert(&I);
5215           return true;
5216         }
5217       }
5218       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5219         if (Size->getValue().ule(MaxHeapToStackSize))
5220           if (UsesCheck(I) || FreeCheck(I)) {
5221             MallocCalls.insert(&I);
5222             return true;
5223           }
5224     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5225       if (MaxHeapToStackSize == -1) {
5226         if (UsesCheck(I) || FreeCheck(I)) {
5227           MallocCalls.insert(&I);
5228           return true;
5229         }
5230       }
5231       // Only if the alignment and sizes are constant.
5232       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5233         if (Size->getValue().ule(MaxHeapToStackSize))
5234           if (UsesCheck(I) || FreeCheck(I)) {
5235             MallocCalls.insert(&I);
5236             return true;
5237           }
5238     } else if (IsCalloc) {
5239       if (MaxHeapToStackSize == -1) {
5240         if (UsesCheck(I) || FreeCheck(I)) {
5241           MallocCalls.insert(&I);
5242           return true;
5243         }
5244       }
5245       bool Overflow = false;
5246       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5247         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5248           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5249                   .ule(MaxHeapToStackSize))
5250             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5251               MallocCalls.insert(&I);
5252               return true;
5253             }
5254     }
5255 
5256     BadMallocCalls.insert(&I);
5257     return true;
5258   };
5259 
5260   size_t NumBadMallocs = BadMallocCalls.size();
5261 
5262   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5263 
5264   if (NumBadMallocs != BadMallocCalls.size())
5265     return ChangeStatus::CHANGED;
5266 
5267   return ChangeStatus::UNCHANGED;
5268 }
5269 
5270 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
AAHeapToStackFunction__anon811b40a70111::AAHeapToStackFunction5271   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5272       : AAHeapToStackImpl(IRP, A) {}
5273 
5274   /// See AbstractAttribute::trackStatistics().
trackStatistics__anon811b40a70111::AAHeapToStackFunction5275   void trackStatistics() const override {
5276     STATS_DECL(
5277         MallocCalls, Function,
5278         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5279     for (auto *C : MallocCalls)
5280       if (!BadMallocCalls.count(C))
5281         ++BUILD_STAT_NAME(MallocCalls, Function);
5282   }
5283 };
5284 
5285 /// ----------------------- Privatizable Pointers ------------------------------
5286 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
AAPrivatizablePtrImpl__anon811b40a70111::AAPrivatizablePtrImpl5287   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5288       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5289 
indicatePessimisticFixpoint__anon811b40a70111::AAPrivatizablePtrImpl5290   ChangeStatus indicatePessimisticFixpoint() override {
5291     AAPrivatizablePtr::indicatePessimisticFixpoint();
5292     PrivatizableType = nullptr;
5293     return ChangeStatus::CHANGED;
5294   }
5295 
5296   /// Identify the type we can chose for a private copy of the underlying
5297   /// argument. None means it is not clear yet, nullptr means there is none.
5298   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5299 
5300   /// Return a privatizable type that encloses both T0 and T1.
5301   /// TODO: This is merely a stub for now as we should manage a mapping as well.
combineTypes__anon811b40a70111::AAPrivatizablePtrImpl5302   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5303     if (!T0.hasValue())
5304       return T1;
5305     if (!T1.hasValue())
5306       return T0;
5307     if (T0 == T1)
5308       return T0;
5309     return nullptr;
5310   }
5311 
getPrivatizableType__anon811b40a70111::AAPrivatizablePtrImpl5312   Optional<Type *> getPrivatizableType() const override {
5313     return PrivatizableType;
5314   }
5315 
getAsStr__anon811b40a70111::AAPrivatizablePtrImpl5316   const std::string getAsStr() const override {
5317     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5318   }
5319 
5320 protected:
5321   Optional<Type *> PrivatizableType;
5322 };
5323 
5324 // TODO: Do this for call site arguments (probably also other values) as well.
5325 
5326 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
AAPrivatizablePtrArgument__anon811b40a70111::AAPrivatizablePtrArgument5327   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5328       : AAPrivatizablePtrImpl(IRP, A) {}
5329 
5330   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
identifyPrivatizableType__anon811b40a70111::AAPrivatizablePtrArgument5331   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5332     // If this is a byval argument and we know all the call sites (so we can
5333     // rewrite them), there is no need to check them explicitly.
5334     bool AllCallSitesKnown;
5335     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5336         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5337                                true, AllCallSitesKnown))
5338       return getAssociatedValue().getType()->getPointerElementType();
5339 
5340     Optional<Type *> Ty;
5341     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5342 
5343     // Make sure the associated call site argument has the same type at all call
5344     // sites and it is an allocation we know is safe to privatize, for now that
5345     // means we only allow alloca instructions.
5346     // TODO: We can additionally analyze the accesses in the callee to  create
5347     //       the type from that information instead. That is a little more
5348     //       involved and will be done in a follow up patch.
5349     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5350       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5351       // Check if a coresponding argument was found or if it is one not
5352       // associated (which can happen for callback calls).
5353       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5354         return false;
5355 
5356       // Check that all call sites agree on a type.
5357       auto &PrivCSArgAA =
5358           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5359       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5360 
5361       LLVM_DEBUG({
5362         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5363         if (CSTy.hasValue() && CSTy.getValue())
5364           CSTy.getValue()->print(dbgs());
5365         else if (CSTy.hasValue())
5366           dbgs() << "<nullptr>";
5367         else
5368           dbgs() << "<none>";
5369       });
5370 
5371       Ty = combineTypes(Ty, CSTy);
5372 
5373       LLVM_DEBUG({
5374         dbgs() << " : New Type: ";
5375         if (Ty.hasValue() && Ty.getValue())
5376           Ty.getValue()->print(dbgs());
5377         else if (Ty.hasValue())
5378           dbgs() << "<nullptr>";
5379         else
5380           dbgs() << "<none>";
5381         dbgs() << "\n";
5382       });
5383 
5384       return !Ty.hasValue() || Ty.getValue();
5385     };
5386 
5387     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5388       return nullptr;
5389     return Ty;
5390   }
5391 
5392   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAPrivatizablePtrArgument5393   ChangeStatus updateImpl(Attributor &A) override {
5394     PrivatizableType = identifyPrivatizableType(A);
5395     if (!PrivatizableType.hasValue())
5396       return ChangeStatus::UNCHANGED;
5397     if (!PrivatizableType.getValue())
5398       return indicatePessimisticFixpoint();
5399 
5400     // The dependence is optional so we don't give up once we give up on the
5401     // alignment.
5402     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5403                         DepClassTy::OPTIONAL);
5404 
5405     // Avoid arguments with padding for now.
5406     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5407         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5408                                                 A.getInfoCache().getDL())) {
5409       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5410       return indicatePessimisticFixpoint();
5411     }
5412 
5413     // Verify callee and caller agree on how the promoted argument would be
5414     // passed.
5415     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5416     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5417     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5418     Function &Fn = *getIRPosition().getAnchorScope();
5419     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5420     ArgsToPromote.insert(getAssociatedArgument());
5421     const auto *TTI =
5422         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5423     if (!TTI ||
5424         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5425             Fn, *TTI, ArgsToPromote, Dummy) ||
5426         ArgsToPromote.empty()) {
5427       LLVM_DEBUG(
5428           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5429                  << Fn.getName() << "\n");
5430       return indicatePessimisticFixpoint();
5431     }
5432 
5433     // Collect the types that will replace the privatizable type in the function
5434     // signature.
5435     SmallVector<Type *, 16> ReplacementTypes;
5436     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5437 
5438     // Register a rewrite of the argument.
5439     Argument *Arg = getAssociatedArgument();
5440     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5441       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5442       return indicatePessimisticFixpoint();
5443     }
5444 
5445     unsigned ArgNo = Arg->getArgNo();
5446 
5447     // Helper to check if for the given call site the associated argument is
5448     // passed to a callback where the privatization would be different.
5449     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5450       SmallVector<const Use *, 4> CallbackUses;
5451       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5452       for (const Use *U : CallbackUses) {
5453         AbstractCallSite CBACS(U);
5454         assert(CBACS && CBACS.isCallbackCall());
5455         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5456           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5457 
5458           LLVM_DEBUG({
5459             dbgs()
5460                 << "[AAPrivatizablePtr] Argument " << *Arg
5461                 << "check if can be privatized in the context of its parent ("
5462                 << Arg->getParent()->getName()
5463                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5464                    "callback ("
5465                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5466                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5467                 << CBACS.getCallArgOperand(CBArg) << " vs "
5468                 << CB.getArgOperand(ArgNo) << "\n"
5469                 << "[AAPrivatizablePtr] " << CBArg << " : "
5470                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5471           });
5472 
5473           if (CBArgNo != int(ArgNo))
5474             continue;
5475           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5476               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5477           if (CBArgPrivAA.isValidState()) {
5478             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5479             if (!CBArgPrivTy.hasValue())
5480               continue;
5481             if (CBArgPrivTy.getValue() == PrivatizableType)
5482               continue;
5483           }
5484 
5485           LLVM_DEBUG({
5486             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5487                    << " cannot be privatized in the context of its parent ("
5488                    << Arg->getParent()->getName()
5489                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5490                       "callback ("
5491                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5492                    << ").\n[AAPrivatizablePtr] for which the argument "
5493                       "privatization is not compatible.\n";
5494           });
5495           return false;
5496         }
5497       }
5498       return true;
5499     };
5500 
5501     // Helper to check if for the given call site the associated argument is
5502     // passed to a direct call where the privatization would be different.
5503     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5504       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5505       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5506       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5507              "Expected a direct call operand for callback call operand");
5508 
5509       LLVM_DEBUG({
5510         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5511                << " check if be privatized in the context of its parent ("
5512                << Arg->getParent()->getName()
5513                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5514                   "direct call of ("
5515                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5516                << ").\n";
5517       });
5518 
5519       Function *DCCallee = DC->getCalledFunction();
5520       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5521         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5522             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5523             DepClassTy::REQUIRED);
5524         if (DCArgPrivAA.isValidState()) {
5525           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5526           if (!DCArgPrivTy.hasValue())
5527             return true;
5528           if (DCArgPrivTy.getValue() == PrivatizableType)
5529             return true;
5530         }
5531       }
5532 
5533       LLVM_DEBUG({
5534         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5535                << " cannot be privatized in the context of its parent ("
5536                << Arg->getParent()->getName()
5537                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5538                   "direct call of ("
5539                << ACS.getInstruction()->getCalledFunction()->getName()
5540                << ").\n[AAPrivatizablePtr] for which the argument "
5541                   "privatization is not compatible.\n";
5542       });
5543       return false;
5544     };
5545 
5546     // Helper to check if the associated argument is used at the given abstract
5547     // call site in a way that is incompatible with the privatization assumed
5548     // here.
5549     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5550       if (ACS.isDirectCall())
5551         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5552       if (ACS.isCallbackCall())
5553         return IsCompatiblePrivArgOfDirectCS(ACS);
5554       return false;
5555     };
5556 
5557     bool AllCallSitesKnown;
5558     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5559                                 AllCallSitesKnown))
5560       return indicatePessimisticFixpoint();
5561 
5562     return ChangeStatus::UNCHANGED;
5563   }
5564 
5565   /// Given a type to private \p PrivType, collect the constituates (which are
5566   /// used) in \p ReplacementTypes.
5567   static void
identifyReplacementTypes__anon811b40a70111::AAPrivatizablePtrArgument5568   identifyReplacementTypes(Type *PrivType,
5569                            SmallVectorImpl<Type *> &ReplacementTypes) {
5570     // TODO: For now we expand the privatization type to the fullest which can
5571     //       lead to dead arguments that need to be removed later.
5572     assert(PrivType && "Expected privatizable type!");
5573 
5574     // Traverse the type, extract constituate types on the outermost level.
5575     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5576       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5577         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5578     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5579       ReplacementTypes.append(PrivArrayType->getNumElements(),
5580                               PrivArrayType->getElementType());
5581     } else {
5582       ReplacementTypes.push_back(PrivType);
5583     }
5584   }
5585 
5586   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5587   /// The values needed are taken from the arguments of \p F starting at
5588   /// position \p ArgNo.
createInitialization__anon811b40a70111::AAPrivatizablePtrArgument5589   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5590                                    unsigned ArgNo, Instruction &IP) {
5591     assert(PrivType && "Expected privatizable type!");
5592 
5593     IRBuilder<NoFolder> IRB(&IP);
5594     const DataLayout &DL = F.getParent()->getDataLayout();
5595 
5596     // Traverse the type, build GEPs and stores.
5597     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5598       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5599       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5600         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5601         Value *Ptr =
5602             constructPointer(PointeeTy, PrivType, &Base,
5603                              PrivStructLayout->getElementOffset(u), IRB, DL);
5604         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5605       }
5606     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5607       Type *PointeeTy = PrivArrayType->getElementType();
5608       Type *PointeePtrTy = PointeeTy->getPointerTo();
5609       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5610       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5611         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5612                                       u * PointeeTySize, IRB, DL);
5613         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5614       }
5615     } else {
5616       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5617     }
5618   }
5619 
5620   /// Extract values from \p Base according to the type \p PrivType at the
5621   /// call position \p ACS. The values are appended to \p ReplacementValues.
createReplacementValues__anon811b40a70111::AAPrivatizablePtrArgument5622   void createReplacementValues(Align Alignment, Type *PrivType,
5623                                AbstractCallSite ACS, Value *Base,
5624                                SmallVectorImpl<Value *> &ReplacementValues) {
5625     assert(Base && "Expected base value!");
5626     assert(PrivType && "Expected privatizable type!");
5627     Instruction *IP = ACS.getInstruction();
5628 
5629     IRBuilder<NoFolder> IRB(IP);
5630     const DataLayout &DL = IP->getModule()->getDataLayout();
5631 
5632     if (Base->getType()->getPointerElementType() != PrivType)
5633       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5634                                                  "", ACS.getInstruction());
5635 
5636     // Traverse the type, build GEPs and loads.
5637     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5638       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5639       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5640         Type *PointeeTy = PrivStructType->getElementType(u);
5641         Value *Ptr =
5642             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5643                              PrivStructLayout->getElementOffset(u), IRB, DL);
5644         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5645         L->setAlignment(Alignment);
5646         ReplacementValues.push_back(L);
5647       }
5648     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5649       Type *PointeeTy = PrivArrayType->getElementType();
5650       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5651       Type *PointeePtrTy = PointeeTy->getPointerTo();
5652       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5653         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5654                                       u * PointeeTySize, IRB, DL);
5655         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5656         L->setAlignment(Alignment);
5657         ReplacementValues.push_back(L);
5658       }
5659     } else {
5660       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5661       L->setAlignment(Alignment);
5662       ReplacementValues.push_back(L);
5663     }
5664   }
5665 
5666   /// See AbstractAttribute::manifest(...)
manifest__anon811b40a70111::AAPrivatizablePtrArgument5667   ChangeStatus manifest(Attributor &A) override {
5668     if (!PrivatizableType.hasValue())
5669       return ChangeStatus::UNCHANGED;
5670     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5671 
5672     // Collect all tail calls in the function as we cannot allow new allocas to
5673     // escape into tail recursion.
5674     // TODO: Be smarter about new allocas escaping into tail calls.
5675     SmallVector<CallInst *, 16> TailCalls;
5676     if (!A.checkForAllInstructions(
5677             [&](Instruction &I) {
5678               CallInst &CI = cast<CallInst>(I);
5679               if (CI.isTailCall())
5680                 TailCalls.push_back(&CI);
5681               return true;
5682             },
5683             *this, {Instruction::Call}))
5684       return ChangeStatus::UNCHANGED;
5685 
5686     Argument *Arg = getAssociatedArgument();
5687     // Query AAAlign attribute for alignment of associated argument to
5688     // determine the best alignment of loads.
5689     const auto &AlignAA =
5690         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5691 
5692     // Callback to repair the associated function. A new alloca is placed at the
5693     // beginning and initialized with the values passed through arguments. The
5694     // new alloca replaces the use of the old pointer argument.
5695     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5696         [=](const Attributor::ArgumentReplacementInfo &ARI,
5697             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5698           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5699           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5700           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5701                                            Arg->getName() + ".priv", IP);
5702           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5703                                ArgIt->getArgNo(), *IP);
5704 
5705           if (AI->getType() != Arg->getType())
5706             AI =
5707                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5708           Arg->replaceAllUsesWith(AI);
5709 
5710           for (CallInst *CI : TailCalls)
5711             CI->setTailCall(false);
5712         };
5713 
5714     // Callback to repair a call site of the associated function. The elements
5715     // of the privatizable type are loaded prior to the call and passed to the
5716     // new function version.
5717     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5718         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5719                       AbstractCallSite ACS,
5720                       SmallVectorImpl<Value *> &NewArgOperands) {
5721           // When no alignment is specified for the load instruction,
5722           // natural alignment is assumed.
5723           createReplacementValues(
5724               assumeAligned(AlignAA.getAssumedAlign()),
5725               PrivatizableType.getValue(), ACS,
5726               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5727               NewArgOperands);
5728         };
5729 
5730     // Collect the types that will replace the privatizable type in the function
5731     // signature.
5732     SmallVector<Type *, 16> ReplacementTypes;
5733     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5734 
5735     // Register a rewrite of the argument.
5736     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5737                                            std::move(FnRepairCB),
5738                                            std::move(ACSRepairCB)))
5739       return ChangeStatus::CHANGED;
5740     return ChangeStatus::UNCHANGED;
5741   }
5742 
5743   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAPrivatizablePtrArgument5744   void trackStatistics() const override {
5745     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5746   }
5747 };
5748 
5749 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
AAPrivatizablePtrFloating__anon811b40a70111::AAPrivatizablePtrFloating5750   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5751       : AAPrivatizablePtrImpl(IRP, A) {}
5752 
5753   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAPrivatizablePtrFloating5754   virtual void initialize(Attributor &A) override {
5755     // TODO: We can privatize more than arguments.
5756     indicatePessimisticFixpoint();
5757   }
5758 
updateImpl__anon811b40a70111::AAPrivatizablePtrFloating5759   ChangeStatus updateImpl(Attributor &A) override {
5760     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5761                      "updateImpl will not be called");
5762   }
5763 
5764   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
identifyPrivatizableType__anon811b40a70111::AAPrivatizablePtrFloating5765   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5766     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5767     if (!Obj) {
5768       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5769       return nullptr;
5770     }
5771 
5772     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5773       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5774         if (CI->isOne())
5775           return Obj->getType()->getPointerElementType();
5776     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5777       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5778           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5779       if (PrivArgAA.isAssumedPrivatizablePtr())
5780         return Obj->getType()->getPointerElementType();
5781     }
5782 
5783     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5784                          "alloca nor privatizable argument: "
5785                       << *Obj << "!\n");
5786     return nullptr;
5787   }
5788 
5789   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAPrivatizablePtrFloating5790   void trackStatistics() const override {
5791     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5792   }
5793 };
5794 
5795 struct AAPrivatizablePtrCallSiteArgument final
5796     : public AAPrivatizablePtrFloating {
AAPrivatizablePtrCallSiteArgument__anon811b40a70111::AAPrivatizablePtrCallSiteArgument5797   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5798       : AAPrivatizablePtrFloating(IRP, A) {}
5799 
5800   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAPrivatizablePtrCallSiteArgument5801   void initialize(Attributor &A) override {
5802     if (getIRPosition().hasAttr(Attribute::ByVal))
5803       indicateOptimisticFixpoint();
5804   }
5805 
5806   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAPrivatizablePtrCallSiteArgument5807   ChangeStatus updateImpl(Attributor &A) override {
5808     PrivatizableType = identifyPrivatizableType(A);
5809     if (!PrivatizableType.hasValue())
5810       return ChangeStatus::UNCHANGED;
5811     if (!PrivatizableType.getValue())
5812       return indicatePessimisticFixpoint();
5813 
5814     const IRPosition &IRP = getIRPosition();
5815     auto &NoCaptureAA =
5816         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5817     if (!NoCaptureAA.isAssumedNoCapture()) {
5818       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5819       return indicatePessimisticFixpoint();
5820     }
5821 
5822     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5823     if (!NoAliasAA.isAssumedNoAlias()) {
5824       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5825       return indicatePessimisticFixpoint();
5826     }
5827 
5828     const auto &MemBehaviorAA =
5829         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5830     if (!MemBehaviorAA.isAssumedReadOnly()) {
5831       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5832       return indicatePessimisticFixpoint();
5833     }
5834 
5835     return ChangeStatus::UNCHANGED;
5836   }
5837 
5838   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAPrivatizablePtrCallSiteArgument5839   void trackStatistics() const override {
5840     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5841   }
5842 };
5843 
5844 struct AAPrivatizablePtrCallSiteReturned final
5845     : public AAPrivatizablePtrFloating {
AAPrivatizablePtrCallSiteReturned__anon811b40a70111::AAPrivatizablePtrCallSiteReturned5846   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5847       : AAPrivatizablePtrFloating(IRP, A) {}
5848 
5849   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAPrivatizablePtrCallSiteReturned5850   void initialize(Attributor &A) override {
5851     // TODO: We can privatize more than arguments.
5852     indicatePessimisticFixpoint();
5853   }
5854 
5855   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAPrivatizablePtrCallSiteReturned5856   void trackStatistics() const override {
5857     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5858   }
5859 };
5860 
5861 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
AAPrivatizablePtrReturned__anon811b40a70111::AAPrivatizablePtrReturned5862   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5863       : AAPrivatizablePtrFloating(IRP, A) {}
5864 
5865   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAPrivatizablePtrReturned5866   void initialize(Attributor &A) override {
5867     // TODO: We can privatize more than arguments.
5868     indicatePessimisticFixpoint();
5869   }
5870 
5871   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAPrivatizablePtrReturned5872   void trackStatistics() const override {
5873     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5874   }
5875 };
5876 
5877 /// -------------------- Memory Behavior Attributes ----------------------------
5878 /// Includes read-none, read-only, and write-only.
5879 /// ----------------------------------------------------------------------------
5880 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
AAMemoryBehaviorImpl__anon811b40a70111::AAMemoryBehaviorImpl5881   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5882       : AAMemoryBehavior(IRP, A) {}
5883 
5884   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAMemoryBehaviorImpl5885   void initialize(Attributor &A) override {
5886     intersectAssumedBits(BEST_STATE);
5887     getKnownStateFromValue(getIRPosition(), getState());
5888     AAMemoryBehavior::initialize(A);
5889   }
5890 
5891   /// Return the memory behavior information encoded in the IR for \p IRP.
getKnownStateFromValue__anon811b40a70111::AAMemoryBehaviorImpl5892   static void getKnownStateFromValue(const IRPosition &IRP,
5893                                      BitIntegerState &State,
5894                                      bool IgnoreSubsumingPositions = false) {
5895     SmallVector<Attribute, 2> Attrs;
5896     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5897     for (const Attribute &Attr : Attrs) {
5898       switch (Attr.getKindAsEnum()) {
5899       case Attribute::ReadNone:
5900         State.addKnownBits(NO_ACCESSES);
5901         break;
5902       case Attribute::ReadOnly:
5903         State.addKnownBits(NO_WRITES);
5904         break;
5905       case Attribute::WriteOnly:
5906         State.addKnownBits(NO_READS);
5907         break;
5908       default:
5909         llvm_unreachable("Unexpected attribute!");
5910       }
5911     }
5912 
5913     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5914       if (!I->mayReadFromMemory())
5915         State.addKnownBits(NO_READS);
5916       if (!I->mayWriteToMemory())
5917         State.addKnownBits(NO_WRITES);
5918     }
5919   }
5920 
5921   /// See AbstractAttribute::getDeducedAttributes(...).
getDeducedAttributes__anon811b40a70111::AAMemoryBehaviorImpl5922   void getDeducedAttributes(LLVMContext &Ctx,
5923                             SmallVectorImpl<Attribute> &Attrs) const override {
5924     assert(Attrs.size() == 0);
5925     if (isAssumedReadNone())
5926       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5927     else if (isAssumedReadOnly())
5928       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5929     else if (isAssumedWriteOnly())
5930       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5931     assert(Attrs.size() <= 1);
5932   }
5933 
5934   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAMemoryBehaviorImpl5935   ChangeStatus manifest(Attributor &A) override {
5936     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5937       return ChangeStatus::UNCHANGED;
5938 
5939     const IRPosition &IRP = getIRPosition();
5940 
5941     // Check if we would improve the existing attributes first.
5942     SmallVector<Attribute, 4> DeducedAttrs;
5943     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5944     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5945           return IRP.hasAttr(Attr.getKindAsEnum(),
5946                              /* IgnoreSubsumingPositions */ true);
5947         }))
5948       return ChangeStatus::UNCHANGED;
5949 
5950     // Clear existing attributes.
5951     IRP.removeAttrs(AttrKinds);
5952 
5953     // Use the generic manifest method.
5954     return IRAttribute::manifest(A);
5955   }
5956 
5957   /// See AbstractState::getAsStr().
getAsStr__anon811b40a70111::AAMemoryBehaviorImpl5958   const std::string getAsStr() const override {
5959     if (isAssumedReadNone())
5960       return "readnone";
5961     if (isAssumedReadOnly())
5962       return "readonly";
5963     if (isAssumedWriteOnly())
5964       return "writeonly";
5965     return "may-read/write";
5966   }
5967 
5968   /// The set of IR attributes AAMemoryBehavior deals with.
5969   static const Attribute::AttrKind AttrKinds[3];
5970 };
5971 
5972 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5973     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5974 
5975 /// Memory behavior attribute for a floating value.
5976 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
AAMemoryBehaviorFloating__anon811b40a70111::AAMemoryBehaviorFloating5977   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5978       : AAMemoryBehaviorImpl(IRP, A) {}
5979 
5980   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAMemoryBehaviorFloating5981   void initialize(Attributor &A) override {
5982     AAMemoryBehaviorImpl::initialize(A);
5983     addUsesOf(A, getAssociatedValue());
5984   }
5985 
5986   /// See AbstractAttribute::updateImpl(...).
5987   ChangeStatus updateImpl(Attributor &A) override;
5988 
5989   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAMemoryBehaviorFloating5990   void trackStatistics() const override {
5991     if (isAssumedReadNone())
5992       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5993     else if (isAssumedReadOnly())
5994       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5995     else if (isAssumedWriteOnly())
5996       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5997   }
5998 
5999 private:
6000   /// Return true if users of \p UserI might access the underlying
6001   /// variable/location described by \p U and should therefore be analyzed.
6002   bool followUsersOfUseIn(Attributor &A, const Use *U,
6003                           const Instruction *UserI);
6004 
6005   /// Update the state according to the effect of use \p U in \p UserI.
6006   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6007 
6008 protected:
6009   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6010   void addUsesOf(Attributor &A, const Value &V);
6011 
6012   /// Container for (transitive) uses of the associated argument.
6013   SmallVector<const Use *, 8> Uses;
6014 
6015   /// Set to remember the uses we already traversed.
6016   SmallPtrSet<const Use *, 8> Visited;
6017 };
6018 
6019 /// Memory behavior attribute for function argument.
6020 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
AAMemoryBehaviorArgument__anon811b40a70111::AAMemoryBehaviorArgument6021   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6022       : AAMemoryBehaviorFloating(IRP, A) {}
6023 
6024   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAMemoryBehaviorArgument6025   void initialize(Attributor &A) override {
6026     intersectAssumedBits(BEST_STATE);
6027     const IRPosition &IRP = getIRPosition();
6028     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6029     // can query it when we use has/getAttr. That would allow us to reuse the
6030     // initialize of the base class here.
6031     bool HasByVal =
6032         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6033     getKnownStateFromValue(IRP, getState(),
6034                            /* IgnoreSubsumingPositions */ HasByVal);
6035 
6036     // Initialize the use vector with all direct uses of the associated value.
6037     Argument *Arg = getAssociatedArgument();
6038     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6039       indicatePessimisticFixpoint();
6040     } else {
6041       addUsesOf(A, *Arg);
6042     }
6043   }
6044 
manifest__anon811b40a70111::AAMemoryBehaviorArgument6045   ChangeStatus manifest(Attributor &A) override {
6046     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6047     if (!getAssociatedValue().getType()->isPointerTy())
6048       return ChangeStatus::UNCHANGED;
6049 
6050     // TODO: From readattrs.ll: "inalloca parameters are always
6051     //                           considered written"
6052     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6053       removeKnownBits(NO_WRITES);
6054       removeAssumedBits(NO_WRITES);
6055     }
6056     return AAMemoryBehaviorFloating::manifest(A);
6057   }
6058 
6059   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAMemoryBehaviorArgument6060   void trackStatistics() const override {
6061     if (isAssumedReadNone())
6062       STATS_DECLTRACK_ARG_ATTR(readnone)
6063     else if (isAssumedReadOnly())
6064       STATS_DECLTRACK_ARG_ATTR(readonly)
6065     else if (isAssumedWriteOnly())
6066       STATS_DECLTRACK_ARG_ATTR(writeonly)
6067   }
6068 };
6069 
6070 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
AAMemoryBehaviorCallSiteArgument__anon811b40a70111::AAMemoryBehaviorCallSiteArgument6071   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6072       : AAMemoryBehaviorArgument(IRP, A) {}
6073 
6074   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAMemoryBehaviorCallSiteArgument6075   void initialize(Attributor &A) override {
6076     // If we don't have an associated attribute this is either a variadic call
6077     // or an indirect call, either way, nothing to do here.
6078     Argument *Arg = getAssociatedArgument();
6079     if (!Arg) {
6080       indicatePessimisticFixpoint();
6081       return;
6082     }
6083     if (Arg->hasByValAttr()) {
6084       addKnownBits(NO_WRITES);
6085       removeKnownBits(NO_READS);
6086       removeAssumedBits(NO_READS);
6087     }
6088     AAMemoryBehaviorArgument::initialize(A);
6089     if (getAssociatedFunction()->isDeclaration())
6090       indicatePessimisticFixpoint();
6091   }
6092 
6093   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAMemoryBehaviorCallSiteArgument6094   ChangeStatus updateImpl(Attributor &A) override {
6095     // TODO: Once we have call site specific value information we can provide
6096     //       call site specific liveness liveness information and then it makes
6097     //       sense to specialize attributes for call sites arguments instead of
6098     //       redirecting requests to the callee argument.
6099     Argument *Arg = getAssociatedArgument();
6100     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6101     auto &ArgAA =
6102         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6103     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6104   }
6105 
6106   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAMemoryBehaviorCallSiteArgument6107   void trackStatistics() const override {
6108     if (isAssumedReadNone())
6109       STATS_DECLTRACK_CSARG_ATTR(readnone)
6110     else if (isAssumedReadOnly())
6111       STATS_DECLTRACK_CSARG_ATTR(readonly)
6112     else if (isAssumedWriteOnly())
6113       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6114   }
6115 };
6116 
6117 /// Memory behavior attribute for a call site return position.
6118 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
AAMemoryBehaviorCallSiteReturned__anon811b40a70111::AAMemoryBehaviorCallSiteReturned6119   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6120       : AAMemoryBehaviorFloating(IRP, A) {}
6121 
6122   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAMemoryBehaviorCallSiteReturned6123   void initialize(Attributor &A) override {
6124     AAMemoryBehaviorImpl::initialize(A);
6125     Function *F = getAssociatedFunction();
6126     if (!F || F->isDeclaration())
6127       indicatePessimisticFixpoint();
6128   }
6129 
6130   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAMemoryBehaviorCallSiteReturned6131   ChangeStatus manifest(Attributor &A) override {
6132     // We do not annotate returned values.
6133     return ChangeStatus::UNCHANGED;
6134   }
6135 
6136   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAMemoryBehaviorCallSiteReturned6137   void trackStatistics() const override {}
6138 };
6139 
6140 /// An AA to represent the memory behavior function attributes.
6141 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
AAMemoryBehaviorFunction__anon811b40a70111::AAMemoryBehaviorFunction6142   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6143       : AAMemoryBehaviorImpl(IRP, A) {}
6144 
6145   /// See AbstractAttribute::updateImpl(Attributor &A).
6146   virtual ChangeStatus updateImpl(Attributor &A) override;
6147 
6148   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a70111::AAMemoryBehaviorFunction6149   ChangeStatus manifest(Attributor &A) override {
6150     Function &F = cast<Function>(getAnchorValue());
6151     if (isAssumedReadNone()) {
6152       F.removeFnAttr(Attribute::ArgMemOnly);
6153       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6154       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6155     }
6156     return AAMemoryBehaviorImpl::manifest(A);
6157   }
6158 
6159   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAMemoryBehaviorFunction6160   void trackStatistics() const override {
6161     if (isAssumedReadNone())
6162       STATS_DECLTRACK_FN_ATTR(readnone)
6163     else if (isAssumedReadOnly())
6164       STATS_DECLTRACK_FN_ATTR(readonly)
6165     else if (isAssumedWriteOnly())
6166       STATS_DECLTRACK_FN_ATTR(writeonly)
6167   }
6168 };
6169 
6170 /// AAMemoryBehavior attribute for call sites.
6171 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
AAMemoryBehaviorCallSite__anon811b40a70111::AAMemoryBehaviorCallSite6172   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6173       : AAMemoryBehaviorImpl(IRP, A) {}
6174 
6175   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a70111::AAMemoryBehaviorCallSite6176   void initialize(Attributor &A) override {
6177     AAMemoryBehaviorImpl::initialize(A);
6178     Function *F = getAssociatedFunction();
6179     if (!F || F->isDeclaration())
6180       indicatePessimisticFixpoint();
6181   }
6182 
6183   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a70111::AAMemoryBehaviorCallSite6184   ChangeStatus updateImpl(Attributor &A) override {
6185     // TODO: Once we have call site specific value information we can provide
6186     //       call site specific liveness liveness information and then it makes
6187     //       sense to specialize attributes for call sites arguments instead of
6188     //       redirecting requests to the callee argument.
6189     Function *F = getAssociatedFunction();
6190     const IRPosition &FnPos = IRPosition::function(*F);
6191     auto &FnAA =
6192         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6193     return clampStateAndIndicateChange(getState(), FnAA.getState());
6194   }
6195 
6196   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a70111::AAMemoryBehaviorCallSite6197   void trackStatistics() const override {
6198     if (isAssumedReadNone())
6199       STATS_DECLTRACK_CS_ATTR(readnone)
6200     else if (isAssumedReadOnly())
6201       STATS_DECLTRACK_CS_ATTR(readonly)
6202     else if (isAssumedWriteOnly())
6203       STATS_DECLTRACK_CS_ATTR(writeonly)
6204   }
6205 };
6206 
updateImpl(Attributor & A)6207 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6208 
6209   // The current assumed state used to determine a change.
6210   auto AssumedState = getAssumed();
6211 
6212   auto CheckRWInst = [&](Instruction &I) {
6213     // If the instruction has an own memory behavior state, use it to restrict
6214     // the local state. No further analysis is required as the other memory
6215     // state is as optimistic as it gets.
6216     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6217       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6218           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6219       intersectAssumedBits(MemBehaviorAA.getAssumed());
6220       return !isAtFixpoint();
6221     }
6222 
6223     // Remove access kind modifiers if necessary.
6224     if (I.mayReadFromMemory())
6225       removeAssumedBits(NO_READS);
6226     if (I.mayWriteToMemory())
6227       removeAssumedBits(NO_WRITES);
6228     return !isAtFixpoint();
6229   };
6230 
6231   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6232     return indicatePessimisticFixpoint();
6233 
6234   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6235                                         : ChangeStatus::UNCHANGED;
6236 }
6237 
updateImpl(Attributor & A)6238 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6239 
6240   const IRPosition &IRP = getIRPosition();
6241   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6242   AAMemoryBehavior::StateType &S = getState();
6243 
6244   // First, check the function scope. We take the known information and we avoid
6245   // work if the assumed information implies the current assumed information for
6246   // this attribute. This is a valid for all but byval arguments.
6247   Argument *Arg = IRP.getAssociatedArgument();
6248   AAMemoryBehavior::base_t FnMemAssumedState =
6249       AAMemoryBehavior::StateType::getWorstState();
6250   if (!Arg || !Arg->hasByValAttr()) {
6251     const auto &FnMemAA =
6252         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6253     FnMemAssumedState = FnMemAA.getAssumed();
6254     S.addKnownBits(FnMemAA.getKnown());
6255     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6256       return ChangeStatus::UNCHANGED;
6257   }
6258 
6259   // Make sure the value is not captured (except through "return"), if
6260   // it is, any information derived would be irrelevant anyway as we cannot
6261   // check the potential aliases introduced by the capture. However, no need
6262   // to fall back to anythign less optimistic than the function state.
6263   const auto &ArgNoCaptureAA =
6264       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6265   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6266     S.intersectAssumedBits(FnMemAssumedState);
6267     return ChangeStatus::CHANGED;
6268   }
6269 
6270   // The current assumed state used to determine a change.
6271   auto AssumedState = S.getAssumed();
6272 
6273   // Liveness information to exclude dead users.
6274   // TODO: Take the FnPos once we have call site specific liveness information.
6275   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6276       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6277       DepClassTy::NONE);
6278 
6279   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6280   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6281     const Use *U = Uses[i];
6282     Instruction *UserI = cast<Instruction>(U->getUser());
6283     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6284                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6285                       << "]\n");
6286     if (A.isAssumedDead(*U, this, &LivenessAA))
6287       continue;
6288 
6289     // Droppable users, e.g., llvm::assume does not actually perform any action.
6290     if (UserI->isDroppable())
6291       continue;
6292 
6293     // Check if the users of UserI should also be visited.
6294     if (followUsersOfUseIn(A, U, UserI))
6295       addUsesOf(A, *UserI);
6296 
6297     // If UserI might touch memory we analyze the use in detail.
6298     if (UserI->mayReadOrWriteMemory())
6299       analyzeUseIn(A, U, UserI);
6300   }
6301 
6302   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6303                                         : ChangeStatus::UNCHANGED;
6304 }
6305 
addUsesOf(Attributor & A,const Value & V)6306 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6307   SmallVector<const Use *, 8> WL;
6308   for (const Use &U : V.uses())
6309     WL.push_back(&U);
6310 
6311   while (!WL.empty()) {
6312     const Use *U = WL.pop_back_val();
6313     if (!Visited.insert(U).second)
6314       continue;
6315 
6316     const Instruction *UserI = cast<Instruction>(U->getUser());
6317     if (UserI->mayReadOrWriteMemory()) {
6318       Uses.push_back(U);
6319       continue;
6320     }
6321     if (!followUsersOfUseIn(A, U, UserI))
6322       continue;
6323     for (const Use &UU : UserI->uses())
6324       WL.push_back(&UU);
6325   }
6326 }
6327 
followUsersOfUseIn(Attributor & A,const Use * U,const Instruction * UserI)6328 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6329                                                   const Instruction *UserI) {
6330   // The loaded value is unrelated to the pointer argument, no need to
6331   // follow the users of the load.
6332   if (isa<LoadInst>(UserI))
6333     return false;
6334 
6335   // By default we follow all uses assuming UserI might leak information on U,
6336   // we have special handling for call sites operands though.
6337   const auto *CB = dyn_cast<CallBase>(UserI);
6338   if (!CB || !CB->isArgOperand(U))
6339     return true;
6340 
6341   // If the use is a call argument known not to be captured, the users of
6342   // the call do not need to be visited because they have to be unrelated to
6343   // the input. Note that this check is not trivial even though we disallow
6344   // general capturing of the underlying argument. The reason is that the
6345   // call might the argument "through return", which we allow and for which we
6346   // need to check call users.
6347   if (U->get()->getType()->isPointerTy()) {
6348     unsigned ArgNo = CB->getArgOperandNo(U);
6349     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6350         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6351     return !ArgNoCaptureAA.isAssumedNoCapture();
6352   }
6353 
6354   return true;
6355 }
6356 
analyzeUseIn(Attributor & A,const Use * U,const Instruction * UserI)6357 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6358                                             const Instruction *UserI) {
6359   assert(UserI->mayReadOrWriteMemory());
6360 
6361   switch (UserI->getOpcode()) {
6362   default:
6363     // TODO: Handle all atomics and other side-effect operations we know of.
6364     break;
6365   case Instruction::Load:
6366     // Loads cause the NO_READS property to disappear.
6367     removeAssumedBits(NO_READS);
6368     return;
6369 
6370   case Instruction::Store:
6371     // Stores cause the NO_WRITES property to disappear if the use is the
6372     // pointer operand. Note that we do assume that capturing was taken care of
6373     // somewhere else.
6374     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6375       removeAssumedBits(NO_WRITES);
6376     return;
6377 
6378   case Instruction::Call:
6379   case Instruction::CallBr:
6380   case Instruction::Invoke: {
6381     // For call sites we look at the argument memory behavior attribute (this
6382     // could be recursive!) in order to restrict our own state.
6383     const auto *CB = cast<CallBase>(UserI);
6384 
6385     // Give up on operand bundles.
6386     if (CB->isBundleOperand(U)) {
6387       indicatePessimisticFixpoint();
6388       return;
6389     }
6390 
6391     // Calling a function does read the function pointer, maybe write it if the
6392     // function is self-modifying.
6393     if (CB->isCallee(U)) {
6394       removeAssumedBits(NO_READS);
6395       break;
6396     }
6397 
6398     // Adjust the possible access behavior based on the information on the
6399     // argument.
6400     IRPosition Pos;
6401     if (U->get()->getType()->isPointerTy())
6402       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6403     else
6404       Pos = IRPosition::callsite_function(*CB);
6405     const auto &MemBehaviorAA =
6406         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6407     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6408     // and at least "known".
6409     intersectAssumedBits(MemBehaviorAA.getAssumed());
6410     return;
6411   }
6412   };
6413 
6414   // Generally, look at the "may-properties" and adjust the assumed state if we
6415   // did not trigger special handling before.
6416   if (UserI->mayReadFromMemory())
6417     removeAssumedBits(NO_READS);
6418   if (UserI->mayWriteToMemory())
6419     removeAssumedBits(NO_WRITES);
6420 }
6421 
6422 } // namespace
6423 
6424 /// -------------------- Memory Locations Attributes ---------------------------
6425 /// Includes read-none, argmemonly, inaccessiblememonly,
6426 /// inaccessiblememorargmemonly
6427 /// ----------------------------------------------------------------------------
6428 
getMemoryLocationsAsStr(AAMemoryLocation::MemoryLocationsKind MLK)6429 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6430     AAMemoryLocation::MemoryLocationsKind MLK) {
6431   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6432     return "all memory";
6433   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6434     return "no memory";
6435   std::string S = "memory:";
6436   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6437     S += "stack,";
6438   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6439     S += "constant,";
6440   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6441     S += "internal global,";
6442   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6443     S += "external global,";
6444   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6445     S += "argument,";
6446   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6447     S += "inaccessible,";
6448   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6449     S += "malloced,";
6450   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6451     S += "unknown,";
6452   S.pop_back();
6453   return S;
6454 }
6455 
6456 namespace {
6457 struct AAMemoryLocationImpl : public AAMemoryLocation {
6458 
AAMemoryLocationImpl__anon811b40a73911::AAMemoryLocationImpl6459   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6460       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6461     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6462       AccessKind2Accesses[u] = nullptr;
6463   }
6464 
~AAMemoryLocationImpl__anon811b40a73911::AAMemoryLocationImpl6465   ~AAMemoryLocationImpl() {
6466     // The AccessSets are allocated via a BumpPtrAllocator, we call
6467     // the destructor manually.
6468     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6469       if (AccessKind2Accesses[u])
6470         AccessKind2Accesses[u]->~AccessSet();
6471   }
6472 
6473   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a73911::AAMemoryLocationImpl6474   void initialize(Attributor &A) override {
6475     intersectAssumedBits(BEST_STATE);
6476     getKnownStateFromValue(A, getIRPosition(), getState());
6477     AAMemoryLocation::initialize(A);
6478   }
6479 
6480   /// Return the memory behavior information encoded in the IR for \p IRP.
getKnownStateFromValue__anon811b40a73911::AAMemoryLocationImpl6481   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6482                                      BitIntegerState &State,
6483                                      bool IgnoreSubsumingPositions = false) {
6484     // For internal functions we ignore `argmemonly` and
6485     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6486     // constant propagation. It is unclear if this is the best way but it is
6487     // unlikely this will cause real performance problems. If we are deriving
6488     // attributes for the anchor function we even remove the attribute in
6489     // addition to ignoring it.
6490     bool UseArgMemOnly = true;
6491     Function *AnchorFn = IRP.getAnchorScope();
6492     if (AnchorFn && A.isRunOn(*AnchorFn))
6493       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6494 
6495     SmallVector<Attribute, 2> Attrs;
6496     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6497     for (const Attribute &Attr : Attrs) {
6498       switch (Attr.getKindAsEnum()) {
6499       case Attribute::ReadNone:
6500         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6501         break;
6502       case Attribute::InaccessibleMemOnly:
6503         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6504         break;
6505       case Attribute::ArgMemOnly:
6506         if (UseArgMemOnly)
6507           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6508         else
6509           IRP.removeAttrs({Attribute::ArgMemOnly});
6510         break;
6511       case Attribute::InaccessibleMemOrArgMemOnly:
6512         if (UseArgMemOnly)
6513           State.addKnownBits(inverseLocation(
6514               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6515         else
6516           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6517         break;
6518       default:
6519         llvm_unreachable("Unexpected attribute!");
6520       }
6521     }
6522   }
6523 
6524   /// See AbstractAttribute::getDeducedAttributes(...).
getDeducedAttributes__anon811b40a73911::AAMemoryLocationImpl6525   void getDeducedAttributes(LLVMContext &Ctx,
6526                             SmallVectorImpl<Attribute> &Attrs) const override {
6527     assert(Attrs.size() == 0);
6528     if (isAssumedReadNone()) {
6529       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6530     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6531       if (isAssumedInaccessibleMemOnly())
6532         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6533       else if (isAssumedArgMemOnly())
6534         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6535       else if (isAssumedInaccessibleOrArgMemOnly())
6536         Attrs.push_back(
6537             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6538     }
6539     assert(Attrs.size() <= 1);
6540   }
6541 
6542   /// See AbstractAttribute::manifest(...).
manifest__anon811b40a73911::AAMemoryLocationImpl6543   ChangeStatus manifest(Attributor &A) override {
6544     const IRPosition &IRP = getIRPosition();
6545 
6546     // Check if we would improve the existing attributes first.
6547     SmallVector<Attribute, 4> DeducedAttrs;
6548     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6549     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6550           return IRP.hasAttr(Attr.getKindAsEnum(),
6551                              /* IgnoreSubsumingPositions */ true);
6552         }))
6553       return ChangeStatus::UNCHANGED;
6554 
6555     // Clear existing attributes.
6556     IRP.removeAttrs(AttrKinds);
6557     if (isAssumedReadNone())
6558       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6559 
6560     // Use the generic manifest method.
6561     return IRAttribute::manifest(A);
6562   }
6563 
6564   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
checkForAllAccessesToMemoryKind__anon811b40a73911::AAMemoryLocationImpl6565   bool checkForAllAccessesToMemoryKind(
6566       function_ref<bool(const Instruction *, const Value *, AccessKind,
6567                         MemoryLocationsKind)>
6568           Pred,
6569       MemoryLocationsKind RequestedMLK) const override {
6570     if (!isValidState())
6571       return false;
6572 
6573     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6574     if (AssumedMLK == NO_LOCATIONS)
6575       return true;
6576 
6577     unsigned Idx = 0;
6578     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6579          CurMLK *= 2, ++Idx) {
6580       if (CurMLK & RequestedMLK)
6581         continue;
6582 
6583       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6584         for (const AccessInfo &AI : *Accesses)
6585           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6586             return false;
6587     }
6588 
6589     return true;
6590   }
6591 
indicatePessimisticFixpoint__anon811b40a73911::AAMemoryLocationImpl6592   ChangeStatus indicatePessimisticFixpoint() override {
6593     // If we give up and indicate a pessimistic fixpoint this instruction will
6594     // become an access for all potential access kinds:
6595     // TODO: Add pointers for argmemonly and globals to improve the results of
6596     //       checkForAllAccessesToMemoryKind.
6597     bool Changed = false;
6598     MemoryLocationsKind KnownMLK = getKnown();
6599     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6600     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6601       if (!(CurMLK & KnownMLK))
6602         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6603                                   getAccessKindFromInst(I));
6604     return AAMemoryLocation::indicatePessimisticFixpoint();
6605   }
6606 
6607 protected:
6608   /// Helper struct to tie together an instruction that has a read or write
6609   /// effect with the pointer it accesses (if any).
6610   struct AccessInfo {
6611 
6612     /// The instruction that caused the access.
6613     const Instruction *I;
6614 
6615     /// The base pointer that is accessed, or null if unknown.
6616     const Value *Ptr;
6617 
6618     /// The kind of access (read/write/read+write).
6619     AccessKind Kind;
6620 
operator ==__anon811b40a73911::AAMemoryLocationImpl::AccessInfo6621     bool operator==(const AccessInfo &RHS) const {
6622       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6623     }
operator ()__anon811b40a73911::AAMemoryLocationImpl::AccessInfo6624     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6625       if (LHS.I != RHS.I)
6626         return LHS.I < RHS.I;
6627       if (LHS.Ptr != RHS.Ptr)
6628         return LHS.Ptr < RHS.Ptr;
6629       if (LHS.Kind != RHS.Kind)
6630         return LHS.Kind < RHS.Kind;
6631       return false;
6632     }
6633   };
6634 
6635   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6636   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6637   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6638   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6639 
6640   /// Categorize the pointer arguments of CB that might access memory in
6641   /// AccessedLoc and update the state and access map accordingly.
6642   void
6643   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6644                                      AAMemoryLocation::StateType &AccessedLocs,
6645                                      bool &Changed);
6646 
6647   /// Return the kind(s) of location that may be accessed by \p V.
6648   AAMemoryLocation::MemoryLocationsKind
6649   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6650 
6651   /// Return the access kind as determined by \p I.
getAccessKindFromInst__anon811b40a73911::AAMemoryLocationImpl6652   AccessKind getAccessKindFromInst(const Instruction *I) {
6653     AccessKind AK = READ_WRITE;
6654     if (I) {
6655       AK = I->mayReadFromMemory() ? READ : NONE;
6656       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6657     }
6658     return AK;
6659   }
6660 
6661   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6662   /// an access of kind \p AK to a \p MLK memory location with the access
6663   /// pointer \p Ptr.
updateStateAndAccessesMap__anon811b40a73911::AAMemoryLocationImpl6664   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6665                                  MemoryLocationsKind MLK, const Instruction *I,
6666                                  const Value *Ptr, bool &Changed,
6667                                  AccessKind AK = READ_WRITE) {
6668 
6669     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6670     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6671     if (!Accesses)
6672       Accesses = new (Allocator) AccessSet();
6673     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6674     State.removeAssumedBits(MLK);
6675   }
6676 
6677   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6678   /// arguments, and update the state and access map accordingly.
6679   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6680                           AAMemoryLocation::StateType &State, bool &Changed);
6681 
6682   /// Used to allocate access sets.
6683   BumpPtrAllocator &Allocator;
6684 
6685   /// The set of IR attributes AAMemoryLocation deals with.
6686   static const Attribute::AttrKind AttrKinds[4];
6687 };
6688 
6689 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6690     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6691     Attribute::InaccessibleMemOrArgMemOnly};
6692 
categorizePtrValue(Attributor & A,const Instruction & I,const Value & Ptr,AAMemoryLocation::StateType & State,bool & Changed)6693 void AAMemoryLocationImpl::categorizePtrValue(
6694     Attributor &A, const Instruction &I, const Value &Ptr,
6695     AAMemoryLocation::StateType &State, bool &Changed) {
6696   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6697                     << Ptr << " ["
6698                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6699 
6700   auto StripGEPCB = [](Value *V) -> Value * {
6701     auto *GEP = dyn_cast<GEPOperator>(V);
6702     while (GEP) {
6703       V = GEP->getPointerOperand();
6704       GEP = dyn_cast<GEPOperator>(V);
6705     }
6706     return V;
6707   };
6708 
6709   auto VisitValueCB = [&](Value &V, const Instruction *,
6710                           AAMemoryLocation::StateType &T,
6711                           bool Stripped) -> bool {
6712     // TODO: recognize the TBAA used for constant accesses.
6713     MemoryLocationsKind MLK = NO_LOCATIONS;
6714     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6715     if (isa<UndefValue>(V))
6716       return true;
6717     if (auto *Arg = dyn_cast<Argument>(&V)) {
6718       if (Arg->hasByValAttr())
6719         MLK = NO_LOCAL_MEM;
6720       else
6721         MLK = NO_ARGUMENT_MEM;
6722     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6723       // Reading constant memory is not treated as a read "effect" by the
6724       // function attr pass so we won't neither. Constants defined by TBAA are
6725       // similar. (We know we do not write it because it is constant.)
6726       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6727         if (GVar->isConstant())
6728           return true;
6729 
6730       if (GV->hasLocalLinkage())
6731         MLK = NO_GLOBAL_INTERNAL_MEM;
6732       else
6733         MLK = NO_GLOBAL_EXTERNAL_MEM;
6734     } else if (isa<ConstantPointerNull>(V) &&
6735                !NullPointerIsDefined(getAssociatedFunction(),
6736                                      V.getType()->getPointerAddressSpace())) {
6737       return true;
6738     } else if (isa<AllocaInst>(V)) {
6739       MLK = NO_LOCAL_MEM;
6740     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6741       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6742           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6743       if (NoAliasAA.isAssumedNoAlias())
6744         MLK = NO_MALLOCED_MEM;
6745       else
6746         MLK = NO_UNKOWN_MEM;
6747     } else {
6748       MLK = NO_UNKOWN_MEM;
6749     }
6750 
6751     assert(MLK != NO_LOCATIONS && "No location specified!");
6752     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6753                               getAccessKindFromInst(&I));
6754     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6755                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6756                       << "\n");
6757     return true;
6758   };
6759 
6760   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6761           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6762           /* UseValueSimplify */ true,
6763           /* MaxValues */ 32, StripGEPCB)) {
6764     LLVM_DEBUG(
6765         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6766     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6767                               getAccessKindFromInst(&I));
6768   } else {
6769     LLVM_DEBUG(
6770         dbgs()
6771         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6772         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6773   }
6774 }
6775 
categorizeArgumentPointerLocations(Attributor & A,CallBase & CB,AAMemoryLocation::StateType & AccessedLocs,bool & Changed)6776 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6777     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6778     bool &Changed) {
6779   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6780 
6781     // Skip non-pointer arguments.
6782     const Value *ArgOp = CB.getArgOperand(ArgNo);
6783     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6784       continue;
6785 
6786     // Skip readnone arguments.
6787     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6788     const auto &ArgOpMemLocationAA =
6789         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6790 
6791     if (ArgOpMemLocationAA.isAssumedReadNone())
6792       continue;
6793 
6794     // Categorize potentially accessed pointer arguments as if there was an
6795     // access instruction with them as pointer.
6796     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6797   }
6798 }
6799 
6800 AAMemoryLocation::MemoryLocationsKind
categorizeAccessedLocations(Attributor & A,Instruction & I,bool & Changed)6801 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6802                                                   bool &Changed) {
6803   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6804                     << I << "\n");
6805 
6806   AAMemoryLocation::StateType AccessedLocs;
6807   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6808 
6809   if (auto *CB = dyn_cast<CallBase>(&I)) {
6810 
6811     // First check if we assume any memory is access is visible.
6812     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6813         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6814     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6815                       << " [" << CBMemLocationAA << "]\n");
6816 
6817     if (CBMemLocationAA.isAssumedReadNone())
6818       return NO_LOCATIONS;
6819 
6820     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6821       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6822                                 Changed, getAccessKindFromInst(&I));
6823       return AccessedLocs.getAssumed();
6824     }
6825 
6826     uint32_t CBAssumedNotAccessedLocs =
6827         CBMemLocationAA.getAssumedNotAccessedLocation();
6828 
6829     // Set the argmemonly and global bit as we handle them separately below.
6830     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6831         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6832 
6833     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6834       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6835         continue;
6836       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6837                                 getAccessKindFromInst(&I));
6838     }
6839 
6840     // Now handle global memory if it might be accessed. This is slightly tricky
6841     // as NO_GLOBAL_MEM has multiple bits set.
6842     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6843     if (HasGlobalAccesses) {
6844       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6845                             AccessKind Kind, MemoryLocationsKind MLK) {
6846         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6847                                   getAccessKindFromInst(&I));
6848         return true;
6849       };
6850       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6851               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6852         return AccessedLocs.getWorstState();
6853     }
6854 
6855     LLVM_DEBUG(
6856         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6857                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6858 
6859     // Now handle argument memory if it might be accessed.
6860     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6861     if (HasArgAccesses)
6862       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6863 
6864     LLVM_DEBUG(
6865         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6866                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6867 
6868     return AccessedLocs.getAssumed();
6869   }
6870 
6871   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6872     LLVM_DEBUG(
6873         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6874                << I << " [" << *Ptr << "]\n");
6875     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6876     return AccessedLocs.getAssumed();
6877   }
6878 
6879   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6880                     << I << "\n");
6881   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6882                             getAccessKindFromInst(&I));
6883   return AccessedLocs.getAssumed();
6884 }
6885 
6886 /// An AA to represent the memory behavior function attributes.
6887 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
AAMemoryLocationFunction__anon811b40a73911::AAMemoryLocationFunction6888   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6889       : AAMemoryLocationImpl(IRP, A) {}
6890 
6891   /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anon811b40a73911::AAMemoryLocationFunction6892   virtual ChangeStatus updateImpl(Attributor &A) override {
6893 
6894     const auto &MemBehaviorAA =
6895         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6896     if (MemBehaviorAA.isAssumedReadNone()) {
6897       if (MemBehaviorAA.isKnownReadNone())
6898         return indicateOptimisticFixpoint();
6899       assert(isAssumedReadNone() &&
6900              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6901       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6902       return ChangeStatus::UNCHANGED;
6903     }
6904 
6905     // The current assumed state used to determine a change.
6906     auto AssumedState = getAssumed();
6907     bool Changed = false;
6908 
6909     auto CheckRWInst = [&](Instruction &I) {
6910       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6911       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6912                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6913       removeAssumedBits(inverseLocation(MLK, false, false));
6914       // Stop once only the valid bit set in the *not assumed location*, thus
6915       // once we don't actually exclude any memory locations in the state.
6916       return getAssumedNotAccessedLocation() != VALID_STATE;
6917     };
6918 
6919     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6920       return indicatePessimisticFixpoint();
6921 
6922     Changed |= AssumedState != getAssumed();
6923     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6924   }
6925 
6926   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAMemoryLocationFunction6927   void trackStatistics() const override {
6928     if (isAssumedReadNone())
6929       STATS_DECLTRACK_FN_ATTR(readnone)
6930     else if (isAssumedArgMemOnly())
6931       STATS_DECLTRACK_FN_ATTR(argmemonly)
6932     else if (isAssumedInaccessibleMemOnly())
6933       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6934     else if (isAssumedInaccessibleOrArgMemOnly())
6935       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6936   }
6937 };
6938 
6939 /// AAMemoryLocation attribute for call sites.
6940 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
AAMemoryLocationCallSite__anon811b40a73911::AAMemoryLocationCallSite6941   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6942       : AAMemoryLocationImpl(IRP, A) {}
6943 
6944   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a73911::AAMemoryLocationCallSite6945   void initialize(Attributor &A) override {
6946     AAMemoryLocationImpl::initialize(A);
6947     Function *F = getAssociatedFunction();
6948     if (!F || F->isDeclaration())
6949       indicatePessimisticFixpoint();
6950   }
6951 
6952   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a73911::AAMemoryLocationCallSite6953   ChangeStatus updateImpl(Attributor &A) override {
6954     // TODO: Once we have call site specific value information we can provide
6955     //       call site specific liveness liveness information and then it makes
6956     //       sense to specialize attributes for call sites arguments instead of
6957     //       redirecting requests to the callee argument.
6958     Function *F = getAssociatedFunction();
6959     const IRPosition &FnPos = IRPosition::function(*F);
6960     auto &FnAA =
6961         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6962     bool Changed = false;
6963     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6964                           AccessKind Kind, MemoryLocationsKind MLK) {
6965       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6966                                 getAccessKindFromInst(I));
6967       return true;
6968     };
6969     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6970       return indicatePessimisticFixpoint();
6971     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6972   }
6973 
6974   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAMemoryLocationCallSite6975   void trackStatistics() const override {
6976     if (isAssumedReadNone())
6977       STATS_DECLTRACK_CS_ATTR(readnone)
6978   }
6979 };
6980 
6981 /// ------------------ Value Constant Range Attribute -------------------------
6982 
6983 struct AAValueConstantRangeImpl : AAValueConstantRange {
6984   using StateType = IntegerRangeState;
AAValueConstantRangeImpl__anon811b40a73911::AAValueConstantRangeImpl6985   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6986       : AAValueConstantRange(IRP, A) {}
6987 
6988   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a73911::AAValueConstantRangeImpl6989   const std::string getAsStr() const override {
6990     std::string Str;
6991     llvm::raw_string_ostream OS(Str);
6992     OS << "range(" << getBitWidth() << ")<";
6993     getKnown().print(OS);
6994     OS << " / ";
6995     getAssumed().print(OS);
6996     OS << ">";
6997     return OS.str();
6998   }
6999 
7000   /// Helper function to get a SCEV expr for the associated value at program
7001   /// point \p I.
getSCEV__anon811b40a73911::AAValueConstantRangeImpl7002   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7003     if (!getAnchorScope())
7004       return nullptr;
7005 
7006     ScalarEvolution *SE =
7007         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7008             *getAnchorScope());
7009 
7010     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7011         *getAnchorScope());
7012 
7013     if (!SE || !LI)
7014       return nullptr;
7015 
7016     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7017     if (!I)
7018       return S;
7019 
7020     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7021   }
7022 
7023   /// Helper function to get a range from SCEV for the associated value at
7024   /// program point \p I.
getConstantRangeFromSCEV__anon811b40a73911::AAValueConstantRangeImpl7025   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7026                                          const Instruction *I = nullptr) const {
7027     if (!getAnchorScope())
7028       return getWorstState(getBitWidth());
7029 
7030     ScalarEvolution *SE =
7031         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7032             *getAnchorScope());
7033 
7034     const SCEV *S = getSCEV(A, I);
7035     if (!SE || !S)
7036       return getWorstState(getBitWidth());
7037 
7038     return SE->getUnsignedRange(S);
7039   }
7040 
7041   /// Helper function to get a range from LVI for the associated value at
7042   /// program point \p I.
7043   ConstantRange
getConstantRangeFromLVI__anon811b40a73911::AAValueConstantRangeImpl7044   getConstantRangeFromLVI(Attributor &A,
7045                           const Instruction *CtxI = nullptr) const {
7046     if (!getAnchorScope())
7047       return getWorstState(getBitWidth());
7048 
7049     LazyValueInfo *LVI =
7050         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7051             *getAnchorScope());
7052 
7053     if (!LVI || !CtxI)
7054       return getWorstState(getBitWidth());
7055     return LVI->getConstantRange(&getAssociatedValue(),
7056                                  const_cast<Instruction *>(CtxI));
7057   }
7058 
7059   /// See AAValueConstantRange::getKnownConstantRange(..).
7060   ConstantRange
getKnownConstantRange__anon811b40a73911::AAValueConstantRangeImpl7061   getKnownConstantRange(Attributor &A,
7062                         const Instruction *CtxI = nullptr) const override {
7063     if (!CtxI || CtxI == getCtxI())
7064       return getKnown();
7065 
7066     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7067     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7068     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7069   }
7070 
7071   /// See AAValueConstantRange::getAssumedConstantRange(..).
7072   ConstantRange
getAssumedConstantRange__anon811b40a73911::AAValueConstantRangeImpl7073   getAssumedConstantRange(Attributor &A,
7074                           const Instruction *CtxI = nullptr) const override {
7075     // TODO: Make SCEV use Attributor assumption.
7076     //       We may be able to bound a variable range via assumptions in
7077     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7078     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7079 
7080     if (!CtxI || CtxI == getCtxI())
7081       return getAssumed();
7082 
7083     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7084     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7085     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7086   }
7087 
7088   /// See AbstractAttribute::initialize(..).
initialize__anon811b40a73911::AAValueConstantRangeImpl7089   void initialize(Attributor &A) override {
7090     // Intersect a range given by SCEV.
7091     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7092 
7093     // Intersect a range given by LVI.
7094     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7095   }
7096 
7097   /// Helper function to create MDNode for range metadata.
7098   static MDNode *
getMDNodeForConstantRange__anon811b40a73911::AAValueConstantRangeImpl7099   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7100                             const ConstantRange &AssumedConstantRange) {
7101     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7102                                   Ty, AssumedConstantRange.getLower())),
7103                               ConstantAsMetadata::get(ConstantInt::get(
7104                                   Ty, AssumedConstantRange.getUpper()))};
7105     return MDNode::get(Ctx, LowAndHigh);
7106   }
7107 
7108   /// Return true if \p Assumed is included in \p KnownRanges.
isBetterRange__anon811b40a73911::AAValueConstantRangeImpl7109   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7110 
7111     if (Assumed.isFullSet())
7112       return false;
7113 
7114     if (!KnownRanges)
7115       return true;
7116 
7117     // If multiple ranges are annotated in IR, we give up to annotate assumed
7118     // range for now.
7119 
7120     // TODO:  If there exists a known range which containts assumed range, we
7121     // can say assumed range is better.
7122     if (KnownRanges->getNumOperands() > 2)
7123       return false;
7124 
7125     ConstantInt *Lower =
7126         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7127     ConstantInt *Upper =
7128         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7129 
7130     ConstantRange Known(Lower->getValue(), Upper->getValue());
7131     return Known.contains(Assumed) && Known != Assumed;
7132   }
7133 
7134   /// Helper function to set range metadata.
7135   static bool
setRangeMetadataIfisBetterRange__anon811b40a73911::AAValueConstantRangeImpl7136   setRangeMetadataIfisBetterRange(Instruction *I,
7137                                   const ConstantRange &AssumedConstantRange) {
7138     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7139     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7140       if (!AssumedConstantRange.isEmptySet()) {
7141         I->setMetadata(LLVMContext::MD_range,
7142                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7143                                                  AssumedConstantRange));
7144         return true;
7145       }
7146     }
7147     return false;
7148   }
7149 
7150   /// See AbstractAttribute::manifest()
manifest__anon811b40a73911::AAValueConstantRangeImpl7151   ChangeStatus manifest(Attributor &A) override {
7152     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7153     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7154     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7155 
7156     auto &V = getAssociatedValue();
7157     if (!AssumedConstantRange.isEmptySet() &&
7158         !AssumedConstantRange.isSingleElement()) {
7159       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7160         assert(I == getCtxI() && "Should not annotate an instruction which is "
7161                                  "not the context instruction");
7162         if (isa<CallInst>(I) || isa<LoadInst>(I))
7163           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7164             Changed = ChangeStatus::CHANGED;
7165       }
7166     }
7167 
7168     return Changed;
7169   }
7170 };
7171 
7172 struct AAValueConstantRangeArgument final
7173     : AAArgumentFromCallSiteArguments<
7174           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7175           true /* BridgeCallBaseContext */> {
7176   using Base = AAArgumentFromCallSiteArguments<
7177       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7178       true /* BridgeCallBaseContext */>;
AAValueConstantRangeArgument__anon811b40a73911::AAValueConstantRangeArgument7179   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7180       : Base(IRP, A) {}
7181 
7182   /// See AbstractAttribute::initialize(..).
initialize__anon811b40a73911::AAValueConstantRangeArgument7183   void initialize(Attributor &A) override {
7184     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7185       indicatePessimisticFixpoint();
7186     } else {
7187       Base::initialize(A);
7188     }
7189   }
7190 
7191   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAValueConstantRangeArgument7192   void trackStatistics() const override {
7193     STATS_DECLTRACK_ARG_ATTR(value_range)
7194   }
7195 };
7196 
7197 struct AAValueConstantRangeReturned
7198     : AAReturnedFromReturnedValues<AAValueConstantRange,
7199                                    AAValueConstantRangeImpl,
7200                                    AAValueConstantRangeImpl::StateType,
7201                                    /* PropogateCallBaseContext */ true> {
7202   using Base =
7203       AAReturnedFromReturnedValues<AAValueConstantRange,
7204                                    AAValueConstantRangeImpl,
7205                                    AAValueConstantRangeImpl::StateType,
7206                                    /* PropogateCallBaseContext */ true>;
AAValueConstantRangeReturned__anon811b40a73911::AAValueConstantRangeReturned7207   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7208       : Base(IRP, A) {}
7209 
7210   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a73911::AAValueConstantRangeReturned7211   void initialize(Attributor &A) override {}
7212 
7213   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAValueConstantRangeReturned7214   void trackStatistics() const override {
7215     STATS_DECLTRACK_FNRET_ATTR(value_range)
7216   }
7217 };
7218 
7219 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
AAValueConstantRangeFloating__anon811b40a73911::AAValueConstantRangeFloating7220   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7221       : AAValueConstantRangeImpl(IRP, A) {}
7222 
7223   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a73911::AAValueConstantRangeFloating7224   void initialize(Attributor &A) override {
7225     AAValueConstantRangeImpl::initialize(A);
7226     Value &V = getAssociatedValue();
7227 
7228     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7229       unionAssumed(ConstantRange(C->getValue()));
7230       indicateOptimisticFixpoint();
7231       return;
7232     }
7233 
7234     if (isa<UndefValue>(&V)) {
7235       // Collapse the undef state to 0.
7236       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7237       indicateOptimisticFixpoint();
7238       return;
7239     }
7240 
7241     if (isa<CallBase>(&V))
7242       return;
7243 
7244     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7245       return;
7246     // If it is a load instruction with range metadata, use it.
7247     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7248       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7249         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7250         return;
7251       }
7252 
7253     // We can work with PHI and select instruction as we traverse their operands
7254     // during update.
7255     if (isa<SelectInst>(V) || isa<PHINode>(V))
7256       return;
7257 
7258     // Otherwise we give up.
7259     indicatePessimisticFixpoint();
7260 
7261     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7262                       << getAssociatedValue() << "\n");
7263   }
7264 
calculateBinaryOperator__anon811b40a73911::AAValueConstantRangeFloating7265   bool calculateBinaryOperator(
7266       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7267       const Instruction *CtxI,
7268       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7269     Value *LHS = BinOp->getOperand(0);
7270     Value *RHS = BinOp->getOperand(1);
7271     // TODO: Allow non integers as well.
7272     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7273       return false;
7274 
7275     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7276         *this, IRPosition::value(*LHS, getCallBaseContext()),
7277         DepClassTy::REQUIRED);
7278     QuerriedAAs.push_back(&LHSAA);
7279     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7280 
7281     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7282         *this, IRPosition::value(*RHS, getCallBaseContext()),
7283         DepClassTy::REQUIRED);
7284     QuerriedAAs.push_back(&RHSAA);
7285     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7286 
7287     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7288 
7289     T.unionAssumed(AssumedRange);
7290 
7291     // TODO: Track a known state too.
7292 
7293     return T.isValidState();
7294   }
7295 
calculateCastInst__anon811b40a73911::AAValueConstantRangeFloating7296   bool calculateCastInst(
7297       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7298       const Instruction *CtxI,
7299       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7300     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7301     // TODO: Allow non integers as well.
7302     Value &OpV = *CastI->getOperand(0);
7303     if (!OpV.getType()->isIntegerTy())
7304       return false;
7305 
7306     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7307         *this, IRPosition::value(OpV, getCallBaseContext()),
7308         DepClassTy::REQUIRED);
7309     QuerriedAAs.push_back(&OpAA);
7310     T.unionAssumed(
7311         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7312     return T.isValidState();
7313   }
7314 
7315   bool
calculateCmpInst__anon811b40a73911::AAValueConstantRangeFloating7316   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7317                    const Instruction *CtxI,
7318                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7319     Value *LHS = CmpI->getOperand(0);
7320     Value *RHS = CmpI->getOperand(1);
7321     // TODO: Allow non integers as well.
7322     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7323       return false;
7324 
7325     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7326         *this, IRPosition::value(*LHS, getCallBaseContext()),
7327         DepClassTy::REQUIRED);
7328     QuerriedAAs.push_back(&LHSAA);
7329     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7330         *this, IRPosition::value(*RHS, getCallBaseContext()),
7331         DepClassTy::REQUIRED);
7332     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7333     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7334 
7335     // If one of them is empty set, we can't decide.
7336     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7337       return true;
7338 
7339     bool MustTrue = false, MustFalse = false;
7340 
7341     auto AllowedRegion =
7342         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7343 
7344     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7345       MustFalse = true;
7346 
7347     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7348       MustTrue = true;
7349 
7350     assert((!MustTrue || !MustFalse) &&
7351            "Either MustTrue or MustFalse should be false!");
7352 
7353     if (MustTrue)
7354       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7355     else if (MustFalse)
7356       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7357     else
7358       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7359 
7360     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7361                       << " " << RHSAA << "\n");
7362 
7363     // TODO: Track a known state too.
7364     return T.isValidState();
7365   }
7366 
7367   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a73911::AAValueConstantRangeFloating7368   ChangeStatus updateImpl(Attributor &A) override {
7369     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7370                             IntegerRangeState &T, bool Stripped) -> bool {
7371       Instruction *I = dyn_cast<Instruction>(&V);
7372       if (!I || isa<CallBase>(I)) {
7373 
7374         // If the value is not instruction, we query AA to Attributor.
7375         const auto &AA = A.getAAFor<AAValueConstantRange>(
7376             *this, IRPosition::value(V, getCallBaseContext()),
7377             DepClassTy::REQUIRED);
7378 
7379         // Clamp operator is not used to utilize a program point CtxI.
7380         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7381 
7382         return T.isValidState();
7383       }
7384 
7385       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7386       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7387         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7388           return false;
7389       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7390         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7391           return false;
7392       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7393         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7394           return false;
7395       } else {
7396         // Give up with other instructions.
7397         // TODO: Add other instructions
7398 
7399         T.indicatePessimisticFixpoint();
7400         return false;
7401       }
7402 
7403       // Catch circular reasoning in a pessimistic way for now.
7404       // TODO: Check how the range evolves and if we stripped anything, see also
7405       //       AADereferenceable or AAAlign for similar situations.
7406       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7407         if (QueriedAA != this)
7408           continue;
7409         // If we are in a stady state we do not need to worry.
7410         if (T.getAssumed() == getState().getAssumed())
7411           continue;
7412         T.indicatePessimisticFixpoint();
7413       }
7414 
7415       return T.isValidState();
7416     };
7417 
7418     IntegerRangeState T(getBitWidth());
7419 
7420     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7421             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7422             /* UseValueSimplify */ false))
7423       return indicatePessimisticFixpoint();
7424 
7425     return clampStateAndIndicateChange(getState(), T);
7426   }
7427 
7428   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAValueConstantRangeFloating7429   void trackStatistics() const override {
7430     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7431   }
7432 };
7433 
7434 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
AAValueConstantRangeFunction__anon811b40a73911::AAValueConstantRangeFunction7435   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7436       : AAValueConstantRangeImpl(IRP, A) {}
7437 
7438   /// See AbstractAttribute::initialize(...).
updateImpl__anon811b40a73911::AAValueConstantRangeFunction7439   ChangeStatus updateImpl(Attributor &A) override {
7440     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7441                      "not be called");
7442   }
7443 
7444   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAValueConstantRangeFunction7445   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7446 };
7447 
7448 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
AAValueConstantRangeCallSite__anon811b40a73911::AAValueConstantRangeCallSite7449   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7450       : AAValueConstantRangeFunction(IRP, A) {}
7451 
7452   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAValueConstantRangeCallSite7453   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7454 };
7455 
7456 struct AAValueConstantRangeCallSiteReturned
7457     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7458                                      AAValueConstantRangeImpl,
7459                                      AAValueConstantRangeImpl::StateType,
7460                                      /* IntroduceCallBaseContext */ true> {
AAValueConstantRangeCallSiteReturned__anon811b40a73911::AAValueConstantRangeCallSiteReturned7461   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7462       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7463                                        AAValueConstantRangeImpl,
7464                                        AAValueConstantRangeImpl::StateType,
7465                                        /* IntroduceCallBaseContext */ true>(IRP,
7466                                                                             A) {
7467   }
7468 
7469   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a73911::AAValueConstantRangeCallSiteReturned7470   void initialize(Attributor &A) override {
7471     // If it is a load instruction with range metadata, use the metadata.
7472     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7473       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7474         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7475 
7476     AAValueConstantRangeImpl::initialize(A);
7477   }
7478 
7479   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAValueConstantRangeCallSiteReturned7480   void trackStatistics() const override {
7481     STATS_DECLTRACK_CSRET_ATTR(value_range)
7482   }
7483 };
7484 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
AAValueConstantRangeCallSiteArgument__anon811b40a73911::AAValueConstantRangeCallSiteArgument7485   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7486       : AAValueConstantRangeFloating(IRP, A) {}
7487 
7488   /// See AbstractAttribute::manifest()
manifest__anon811b40a73911::AAValueConstantRangeCallSiteArgument7489   ChangeStatus manifest(Attributor &A) override {
7490     return ChangeStatus::UNCHANGED;
7491   }
7492 
7493   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAValueConstantRangeCallSiteArgument7494   void trackStatistics() const override {
7495     STATS_DECLTRACK_CSARG_ATTR(value_range)
7496   }
7497 };
7498 
7499 /// ------------------ Potential Values Attribute -------------------------
7500 
7501 struct AAPotentialValuesImpl : AAPotentialValues {
7502   using StateType = PotentialConstantIntValuesState;
7503 
AAPotentialValuesImpl__anon811b40a73911::AAPotentialValuesImpl7504   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7505       : AAPotentialValues(IRP, A) {}
7506 
7507   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a73911::AAPotentialValuesImpl7508   const std::string getAsStr() const override {
7509     std::string Str;
7510     llvm::raw_string_ostream OS(Str);
7511     OS << getState();
7512     return OS.str();
7513   }
7514 
7515   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a73911::AAPotentialValuesImpl7516   ChangeStatus updateImpl(Attributor &A) override {
7517     return indicatePessimisticFixpoint();
7518   }
7519 };
7520 
7521 struct AAPotentialValuesArgument final
7522     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7523                                       PotentialConstantIntValuesState> {
7524   using Base =
7525       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7526                                       PotentialConstantIntValuesState>;
AAPotentialValuesArgument__anon811b40a73911::AAPotentialValuesArgument7527   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7528       : Base(IRP, A) {}
7529 
7530   /// See AbstractAttribute::initialize(..).
initialize__anon811b40a73911::AAPotentialValuesArgument7531   void initialize(Attributor &A) override {
7532     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7533       indicatePessimisticFixpoint();
7534     } else {
7535       Base::initialize(A);
7536     }
7537   }
7538 
7539   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAPotentialValuesArgument7540   void trackStatistics() const override {
7541     STATS_DECLTRACK_ARG_ATTR(potential_values)
7542   }
7543 };
7544 
7545 struct AAPotentialValuesReturned
7546     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7547   using Base =
7548       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
AAPotentialValuesReturned__anon811b40a73911::AAPotentialValuesReturned7549   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7550       : Base(IRP, A) {}
7551 
7552   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAPotentialValuesReturned7553   void trackStatistics() const override {
7554     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7555   }
7556 };
7557 
7558 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
AAPotentialValuesFloating__anon811b40a73911::AAPotentialValuesFloating7559   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7560       : AAPotentialValuesImpl(IRP, A) {}
7561 
7562   /// See AbstractAttribute::initialize(..).
initialize__anon811b40a73911::AAPotentialValuesFloating7563   void initialize(Attributor &A) override {
7564     Value &V = getAssociatedValue();
7565 
7566     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7567       unionAssumed(C->getValue());
7568       indicateOptimisticFixpoint();
7569       return;
7570     }
7571 
7572     if (isa<UndefValue>(&V)) {
7573       unionAssumedWithUndef();
7574       indicateOptimisticFixpoint();
7575       return;
7576     }
7577 
7578     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7579       return;
7580 
7581     if (isa<SelectInst>(V) || isa<PHINode>(V))
7582       return;
7583 
7584     indicatePessimisticFixpoint();
7585 
7586     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7587                       << getAssociatedValue() << "\n");
7588   }
7589 
calculateICmpInst__anon811b40a73911::AAPotentialValuesFloating7590   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7591                                 const APInt &RHS) {
7592     ICmpInst::Predicate Pred = ICI->getPredicate();
7593     switch (Pred) {
7594     case ICmpInst::ICMP_UGT:
7595       return LHS.ugt(RHS);
7596     case ICmpInst::ICMP_SGT:
7597       return LHS.sgt(RHS);
7598     case ICmpInst::ICMP_EQ:
7599       return LHS.eq(RHS);
7600     case ICmpInst::ICMP_UGE:
7601       return LHS.uge(RHS);
7602     case ICmpInst::ICMP_SGE:
7603       return LHS.sge(RHS);
7604     case ICmpInst::ICMP_ULT:
7605       return LHS.ult(RHS);
7606     case ICmpInst::ICMP_SLT:
7607       return LHS.slt(RHS);
7608     case ICmpInst::ICMP_NE:
7609       return LHS.ne(RHS);
7610     case ICmpInst::ICMP_ULE:
7611       return LHS.ule(RHS);
7612     case ICmpInst::ICMP_SLE:
7613       return LHS.sle(RHS);
7614     default:
7615       llvm_unreachable("Invalid ICmp predicate!");
7616     }
7617   }
7618 
calculateCastInst__anon811b40a73911::AAPotentialValuesFloating7619   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7620                                  uint32_t ResultBitWidth) {
7621     Instruction::CastOps CastOp = CI->getOpcode();
7622     switch (CastOp) {
7623     default:
7624       llvm_unreachable("unsupported or not integer cast");
7625     case Instruction::Trunc:
7626       return Src.trunc(ResultBitWidth);
7627     case Instruction::SExt:
7628       return Src.sext(ResultBitWidth);
7629     case Instruction::ZExt:
7630       return Src.zext(ResultBitWidth);
7631     case Instruction::BitCast:
7632       return Src;
7633     }
7634   }
7635 
calculateBinaryOperator__anon811b40a73911::AAPotentialValuesFloating7636   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7637                                        const APInt &LHS, const APInt &RHS,
7638                                        bool &SkipOperation, bool &Unsupported) {
7639     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7640     // Unsupported is set to true when the binary operator is not supported.
7641     // SkipOperation is set to true when UB occur with the given operand pair
7642     // (LHS, RHS).
7643     // TODO: we should look at nsw and nuw keywords to handle operations
7644     //       that create poison or undef value.
7645     switch (BinOpcode) {
7646     default:
7647       Unsupported = true;
7648       return LHS;
7649     case Instruction::Add:
7650       return LHS + RHS;
7651     case Instruction::Sub:
7652       return LHS - RHS;
7653     case Instruction::Mul:
7654       return LHS * RHS;
7655     case Instruction::UDiv:
7656       if (RHS.isNullValue()) {
7657         SkipOperation = true;
7658         return LHS;
7659       }
7660       return LHS.udiv(RHS);
7661     case Instruction::SDiv:
7662       if (RHS.isNullValue()) {
7663         SkipOperation = true;
7664         return LHS;
7665       }
7666       return LHS.sdiv(RHS);
7667     case Instruction::URem:
7668       if (RHS.isNullValue()) {
7669         SkipOperation = true;
7670         return LHS;
7671       }
7672       return LHS.urem(RHS);
7673     case Instruction::SRem:
7674       if (RHS.isNullValue()) {
7675         SkipOperation = true;
7676         return LHS;
7677       }
7678       return LHS.srem(RHS);
7679     case Instruction::Shl:
7680       return LHS.shl(RHS);
7681     case Instruction::LShr:
7682       return LHS.lshr(RHS);
7683     case Instruction::AShr:
7684       return LHS.ashr(RHS);
7685     case Instruction::And:
7686       return LHS & RHS;
7687     case Instruction::Or:
7688       return LHS | RHS;
7689     case Instruction::Xor:
7690       return LHS ^ RHS;
7691     }
7692   }
7693 
calculateBinaryOperatorAndTakeUnion__anon811b40a73911::AAPotentialValuesFloating7694   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7695                                            const APInt &LHS, const APInt &RHS) {
7696     bool SkipOperation = false;
7697     bool Unsupported = false;
7698     APInt Result =
7699         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7700     if (Unsupported)
7701       return false;
7702     // If SkipOperation is true, we can ignore this operand pair (L, R).
7703     if (!SkipOperation)
7704       unionAssumed(Result);
7705     return isValidState();
7706   }
7707 
updateWithICmpInst__anon811b40a73911::AAPotentialValuesFloating7708   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7709     auto AssumedBefore = getAssumed();
7710     Value *LHS = ICI->getOperand(0);
7711     Value *RHS = ICI->getOperand(1);
7712     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7713       return indicatePessimisticFixpoint();
7714 
7715     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7716                                                 DepClassTy::REQUIRED);
7717     if (!LHSAA.isValidState())
7718       return indicatePessimisticFixpoint();
7719 
7720     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7721                                                 DepClassTy::REQUIRED);
7722     if (!RHSAA.isValidState())
7723       return indicatePessimisticFixpoint();
7724 
7725     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7726     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7727 
7728     // TODO: make use of undef flag to limit potential values aggressively.
7729     bool MaybeTrue = false, MaybeFalse = false;
7730     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7731     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7732       // The result of any comparison between undefs can be soundly replaced
7733       // with undef.
7734       unionAssumedWithUndef();
7735     } else if (LHSAA.undefIsContained()) {
7736       bool MaybeTrue = false, MaybeFalse = false;
7737       for (const APInt &R : RHSAAPVS) {
7738         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7739         MaybeTrue |= CmpResult;
7740         MaybeFalse |= !CmpResult;
7741         if (MaybeTrue & MaybeFalse)
7742           return indicatePessimisticFixpoint();
7743       }
7744     } else if (RHSAA.undefIsContained()) {
7745       for (const APInt &L : LHSAAPVS) {
7746         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7747         MaybeTrue |= CmpResult;
7748         MaybeFalse |= !CmpResult;
7749         if (MaybeTrue & MaybeFalse)
7750           return indicatePessimisticFixpoint();
7751       }
7752     } else {
7753       for (const APInt &L : LHSAAPVS) {
7754         for (const APInt &R : RHSAAPVS) {
7755           bool CmpResult = calculateICmpInst(ICI, L, R);
7756           MaybeTrue |= CmpResult;
7757           MaybeFalse |= !CmpResult;
7758           if (MaybeTrue & MaybeFalse)
7759             return indicatePessimisticFixpoint();
7760         }
7761       }
7762     }
7763     if (MaybeTrue)
7764       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7765     if (MaybeFalse)
7766       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7767     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7768                                          : ChangeStatus::CHANGED;
7769   }
7770 
updateWithSelectInst__anon811b40a73911::AAPotentialValuesFloating7771   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7772     auto AssumedBefore = getAssumed();
7773     Value *LHS = SI->getTrueValue();
7774     Value *RHS = SI->getFalseValue();
7775     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7776       return indicatePessimisticFixpoint();
7777 
7778     // TODO: Use assumed simplified condition value
7779     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7780                                                 DepClassTy::REQUIRED);
7781     if (!LHSAA.isValidState())
7782       return indicatePessimisticFixpoint();
7783 
7784     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7785                                                 DepClassTy::REQUIRED);
7786     if (!RHSAA.isValidState())
7787       return indicatePessimisticFixpoint();
7788 
7789     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7790       // select i1 *, undef , undef => undef
7791       unionAssumedWithUndef();
7792     else {
7793       unionAssumed(LHSAA);
7794       unionAssumed(RHSAA);
7795     }
7796     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7797                                          : ChangeStatus::CHANGED;
7798   }
7799 
updateWithCastInst__anon811b40a73911::AAPotentialValuesFloating7800   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7801     auto AssumedBefore = getAssumed();
7802     if (!CI->isIntegerCast())
7803       return indicatePessimisticFixpoint();
7804     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7805     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7806     Value *Src = CI->getOperand(0);
7807     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7808                                                 DepClassTy::REQUIRED);
7809     if (!SrcAA.isValidState())
7810       return indicatePessimisticFixpoint();
7811     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7812     if (SrcAA.undefIsContained())
7813       unionAssumedWithUndef();
7814     else {
7815       for (const APInt &S : SrcAAPVS) {
7816         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7817         unionAssumed(T);
7818       }
7819     }
7820     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7821                                          : ChangeStatus::CHANGED;
7822   }
7823 
updateWithBinaryOperator__anon811b40a73911::AAPotentialValuesFloating7824   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7825     auto AssumedBefore = getAssumed();
7826     Value *LHS = BinOp->getOperand(0);
7827     Value *RHS = BinOp->getOperand(1);
7828     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7829       return indicatePessimisticFixpoint();
7830 
7831     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7832                                                 DepClassTy::REQUIRED);
7833     if (!LHSAA.isValidState())
7834       return indicatePessimisticFixpoint();
7835 
7836     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7837                                                 DepClassTy::REQUIRED);
7838     if (!RHSAA.isValidState())
7839       return indicatePessimisticFixpoint();
7840 
7841     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7842     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7843     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7844 
7845     // TODO: make use of undef flag to limit potential values aggressively.
7846     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7847       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7848         return indicatePessimisticFixpoint();
7849     } else if (LHSAA.undefIsContained()) {
7850       for (const APInt &R : RHSAAPVS) {
7851         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7852           return indicatePessimisticFixpoint();
7853       }
7854     } else if (RHSAA.undefIsContained()) {
7855       for (const APInt &L : LHSAAPVS) {
7856         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7857           return indicatePessimisticFixpoint();
7858       }
7859     } else {
7860       for (const APInt &L : LHSAAPVS) {
7861         for (const APInt &R : RHSAAPVS) {
7862           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7863             return indicatePessimisticFixpoint();
7864         }
7865       }
7866     }
7867     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7868                                          : ChangeStatus::CHANGED;
7869   }
7870 
updateWithPHINode__anon811b40a73911::AAPotentialValuesFloating7871   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7872     auto AssumedBefore = getAssumed();
7873     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7874       Value *IncomingValue = PHI->getIncomingValue(u);
7875       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7876           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7877       if (!PotentialValuesAA.isValidState())
7878         return indicatePessimisticFixpoint();
7879       if (PotentialValuesAA.undefIsContained())
7880         unionAssumedWithUndef();
7881       else
7882         unionAssumed(PotentialValuesAA.getAssumed());
7883     }
7884     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7885                                          : ChangeStatus::CHANGED;
7886   }
7887 
7888   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a73911::AAPotentialValuesFloating7889   ChangeStatus updateImpl(Attributor &A) override {
7890     Value &V = getAssociatedValue();
7891     Instruction *I = dyn_cast<Instruction>(&V);
7892 
7893     if (auto *ICI = dyn_cast<ICmpInst>(I))
7894       return updateWithICmpInst(A, ICI);
7895 
7896     if (auto *SI = dyn_cast<SelectInst>(I))
7897       return updateWithSelectInst(A, SI);
7898 
7899     if (auto *CI = dyn_cast<CastInst>(I))
7900       return updateWithCastInst(A, CI);
7901 
7902     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7903       return updateWithBinaryOperator(A, BinOp);
7904 
7905     if (auto *PHI = dyn_cast<PHINode>(I))
7906       return updateWithPHINode(A, PHI);
7907 
7908     return indicatePessimisticFixpoint();
7909   }
7910 
7911   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAPotentialValuesFloating7912   void trackStatistics() const override {
7913     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7914   }
7915 };
7916 
7917 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
AAPotentialValuesFunction__anon811b40a73911::AAPotentialValuesFunction7918   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7919       : AAPotentialValuesImpl(IRP, A) {}
7920 
7921   /// See AbstractAttribute::initialize(...).
updateImpl__anon811b40a73911::AAPotentialValuesFunction7922   ChangeStatus updateImpl(Attributor &A) override {
7923     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7924                      "not be called");
7925   }
7926 
7927   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAPotentialValuesFunction7928   void trackStatistics() const override {
7929     STATS_DECLTRACK_FN_ATTR(potential_values)
7930   }
7931 };
7932 
7933 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
AAPotentialValuesCallSite__anon811b40a73911::AAPotentialValuesCallSite7934   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7935       : AAPotentialValuesFunction(IRP, A) {}
7936 
7937   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAPotentialValuesCallSite7938   void trackStatistics() const override {
7939     STATS_DECLTRACK_CS_ATTR(potential_values)
7940   }
7941 };
7942 
7943 struct AAPotentialValuesCallSiteReturned
7944     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
AAPotentialValuesCallSiteReturned__anon811b40a73911::AAPotentialValuesCallSiteReturned7945   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7946       : AACallSiteReturnedFromReturned<AAPotentialValues,
7947                                        AAPotentialValuesImpl>(IRP, A) {}
7948 
7949   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAPotentialValuesCallSiteReturned7950   void trackStatistics() const override {
7951     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7952   }
7953 };
7954 
7955 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
AAPotentialValuesCallSiteArgument__anon811b40a73911::AAPotentialValuesCallSiteArgument7956   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7957       : AAPotentialValuesFloating(IRP, A) {}
7958 
7959   /// See AbstractAttribute::initialize(..).
initialize__anon811b40a73911::AAPotentialValuesCallSiteArgument7960   void initialize(Attributor &A) override {
7961     Value &V = getAssociatedValue();
7962 
7963     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7964       unionAssumed(C->getValue());
7965       indicateOptimisticFixpoint();
7966       return;
7967     }
7968 
7969     if (isa<UndefValue>(&V)) {
7970       unionAssumedWithUndef();
7971       indicateOptimisticFixpoint();
7972       return;
7973     }
7974   }
7975 
7976   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a73911::AAPotentialValuesCallSiteArgument7977   ChangeStatus updateImpl(Attributor &A) override {
7978     Value &V = getAssociatedValue();
7979     auto AssumedBefore = getAssumed();
7980     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
7981                                              DepClassTy::REQUIRED);
7982     const auto &S = AA.getAssumed();
7983     unionAssumed(S);
7984     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7985                                          : ChangeStatus::CHANGED;
7986   }
7987 
7988   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AAPotentialValuesCallSiteArgument7989   void trackStatistics() const override {
7990     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7991   }
7992 };
7993 
7994 /// ------------------------ NoUndef Attribute ---------------------------------
7995 struct AANoUndefImpl : AANoUndef {
AANoUndefImpl__anon811b40a73911::AANoUndefImpl7996   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7997 
7998   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a73911::AANoUndefImpl7999   void initialize(Attributor &A) override {
8000     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8001       indicateOptimisticFixpoint();
8002       return;
8003     }
8004     Value &V = getAssociatedValue();
8005     if (isa<UndefValue>(V))
8006       indicatePessimisticFixpoint();
8007     else if (isa<FreezeInst>(V))
8008       indicateOptimisticFixpoint();
8009     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8010              isGuaranteedNotToBeUndefOrPoison(&V))
8011       indicateOptimisticFixpoint();
8012     else
8013       AANoUndef::initialize(A);
8014   }
8015 
8016   /// See followUsesInMBEC
followUseInMBEC__anon811b40a73911::AANoUndefImpl8017   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8018                        AANoUndef::StateType &State) {
8019     const Value *UseV = U->get();
8020     const DominatorTree *DT = nullptr;
8021     AssumptionCache *AC = nullptr;
8022     InformationCache &InfoCache = A.getInfoCache();
8023     if (Function *F = getAnchorScope()) {
8024       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8025       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8026     }
8027     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8028     bool TrackUse = false;
8029     // Track use for instructions which must produce undef or poison bits when
8030     // at least one operand contains such bits.
8031     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8032       TrackUse = true;
8033     return TrackUse;
8034   }
8035 
8036   /// See AbstractAttribute::getAsStr().
getAsStr__anon811b40a73911::AANoUndefImpl8037   const std::string getAsStr() const override {
8038     return getAssumed() ? "noundef" : "may-undef-or-poison";
8039   }
8040 
manifest__anon811b40a73911::AANoUndefImpl8041   ChangeStatus manifest(Attributor &A) override {
8042     // We don't manifest noundef attribute for dead positions because the
8043     // associated values with dead positions would be replaced with undef
8044     // values.
8045     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8046       return ChangeStatus::UNCHANGED;
8047     // A position whose simplified value does not have any value is
8048     // considered to be dead. We don't manifest noundef in such positions for
8049     // the same reason above.
8050     auto &ValueSimplifyAA =
8051         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
8052     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
8053       return ChangeStatus::UNCHANGED;
8054     return AANoUndef::manifest(A);
8055   }
8056 };
8057 
8058 struct AANoUndefFloating : public AANoUndefImpl {
AANoUndefFloating__anon811b40a73911::AANoUndefFloating8059   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8060       : AANoUndefImpl(IRP, A) {}
8061 
8062   /// See AbstractAttribute::initialize(...).
initialize__anon811b40a73911::AANoUndefFloating8063   void initialize(Attributor &A) override {
8064     AANoUndefImpl::initialize(A);
8065     if (!getState().isAtFixpoint())
8066       if (Instruction *CtxI = getCtxI())
8067         followUsesInMBEC(*this, A, getState(), *CtxI);
8068   }
8069 
8070   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon811b40a73911::AANoUndefFloating8071   ChangeStatus updateImpl(Attributor &A) override {
8072     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8073                             AANoUndef::StateType &T, bool Stripped) -> bool {
8074       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8075                                              DepClassTy::REQUIRED);
8076       if (!Stripped && this == &AA) {
8077         T.indicatePessimisticFixpoint();
8078       } else {
8079         const AANoUndef::StateType &S =
8080             static_cast<const AANoUndef::StateType &>(AA.getState());
8081         T ^= S;
8082       }
8083       return T.isValidState();
8084     };
8085 
8086     StateType T;
8087     if (!genericValueTraversal<AANoUndef, StateType>(
8088             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8089       return indicatePessimisticFixpoint();
8090 
8091     return clampStateAndIndicateChange(getState(), T);
8092   }
8093 
8094   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AANoUndefFloating8095   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8096 };
8097 
8098 struct AANoUndefReturned final
8099     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
AANoUndefReturned__anon811b40a73911::AANoUndefReturned8100   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8101       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8102 
8103   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AANoUndefReturned8104   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8105 };
8106 
8107 struct AANoUndefArgument final
8108     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
AANoUndefArgument__anon811b40a73911::AANoUndefArgument8109   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8110       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8111 
8112   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AANoUndefArgument8113   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8114 };
8115 
8116 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
AANoUndefCallSiteArgument__anon811b40a73911::AANoUndefCallSiteArgument8117   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8118       : AANoUndefFloating(IRP, A) {}
8119 
8120   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AANoUndefCallSiteArgument8121   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8122 };
8123 
8124 struct AANoUndefCallSiteReturned final
8125     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
AANoUndefCallSiteReturned__anon811b40a73911::AANoUndefCallSiteReturned8126   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8127       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8128 
8129   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon811b40a73911::AANoUndefCallSiteReturned8130   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8131 };
8132 } // namespace
8133 
8134 const char AAReturnedValues::ID = 0;
8135 const char AANoUnwind::ID = 0;
8136 const char AANoSync::ID = 0;
8137 const char AANoFree::ID = 0;
8138 const char AANonNull::ID = 0;
8139 const char AANoRecurse::ID = 0;
8140 const char AAWillReturn::ID = 0;
8141 const char AAUndefinedBehavior::ID = 0;
8142 const char AANoAlias::ID = 0;
8143 const char AAReachability::ID = 0;
8144 const char AANoReturn::ID = 0;
8145 const char AAIsDead::ID = 0;
8146 const char AADereferenceable::ID = 0;
8147 const char AAAlign::ID = 0;
8148 const char AANoCapture::ID = 0;
8149 const char AAValueSimplify::ID = 0;
8150 const char AAHeapToStack::ID = 0;
8151 const char AAPrivatizablePtr::ID = 0;
8152 const char AAMemoryBehavior::ID = 0;
8153 const char AAMemoryLocation::ID = 0;
8154 const char AAValueConstantRange::ID = 0;
8155 const char AAPotentialValues::ID = 0;
8156 const char AANoUndef::ID = 0;
8157 
8158 // Macro magic to create the static generator function for attributes that
8159 // follow the naming scheme.
8160 
8161 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8162   case IRPosition::PK:                                                         \
8163     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8164 
8165 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8166   case IRPosition::PK:                                                         \
8167     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8168     ++NumAAs;                                                                  \
8169     break;
8170 
8171 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8172   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8173     CLASS *AA = nullptr;                                                       \
8174     switch (IRP.getPositionKind()) {                                           \
8175       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8176       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8177       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8178       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8179       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8180       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8181       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8182       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8183     }                                                                          \
8184     return *AA;                                                                \
8185   }
8186 
8187 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8188   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8189     CLASS *AA = nullptr;                                                       \
8190     switch (IRP.getPositionKind()) {                                           \
8191       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8192       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8193       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8194       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8195       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8196       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8197       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8198       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8199     }                                                                          \
8200     return *AA;                                                                \
8201   }
8202 
8203 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8204   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8205     CLASS *AA = nullptr;                                                       \
8206     switch (IRP.getPositionKind()) {                                           \
8207       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8208       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8209       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8210       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8211       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8212       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8213       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8214       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8215     }                                                                          \
8216     return *AA;                                                                \
8217   }
8218 
8219 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8220   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8221     CLASS *AA = nullptr;                                                       \
8222     switch (IRP.getPositionKind()) {                                           \
8223       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8224       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8225       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8226       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8227       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8228       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8229       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8230       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8231     }                                                                          \
8232     return *AA;                                                                \
8233   }
8234 
8235 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8236   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8237     CLASS *AA = nullptr;                                                       \
8238     switch (IRP.getPositionKind()) {                                           \
8239       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8240       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8241       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8242       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8243       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8244       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8245       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8246       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8247     }                                                                          \
8248     return *AA;                                                                \
8249   }
8250 
8251 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8252 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8253 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8254 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8255 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8256 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8257 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8258 
8259 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8260 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8261 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8262 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8263 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8264 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8265 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8266 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8267 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8268 
8269 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8270 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8271 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8272 
8273 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8274 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8275 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8276 
8277 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8278 
8279 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8280 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8281 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8282 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8283 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8284 #undef SWITCH_PK_CREATE
8285 #undef SWITCH_PK_INV
8286