xref: /llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp (revision 9c7c3f94ef5fe178df569eccb35d636a30f89c02)
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/PatternMatch.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include "llvm/Support/SaveAndRestore.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <optional>
62 #include <utility>
63 
64 #define DEBUG_TYPE "basicaa"
65 
66 using namespace llvm;
67 
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70                                           cl::init(true));
71 
72 static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
73                                                    cl::Hidden, cl::init(true));
74 
75 /// SearchLimitReached / SearchTimes shows how often the limit of
76 /// to decompose GEPs is reached. It will affect the precision
77 /// of basic alias analysis.
78 STATISTIC(SearchLimitReached, "Number of times the limit to "
79                               "decompose GEPs is reached");
80 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
81 
82 // The max limit of the search depth in DecomposeGEPExpression() and
83 // getUnderlyingObject().
84 static const unsigned MaxLookupSearchDepth = 6;
85 
86 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
87                                FunctionAnalysisManager::Invalidator &Inv) {
88   // We don't care if this analysis itself is preserved, it has no state. But
89   // we need to check that the analyses it depends on have been. Note that we
90   // may be created without handles to some analyses and in that case don't
91   // depend on them.
92   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
93       (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
94     return true;
95 
96   // Otherwise this analysis result remains valid.
97   return false;
98 }
99 
100 //===----------------------------------------------------------------------===//
101 // Useful predicates
102 //===----------------------------------------------------------------------===//
103 
104 /// Returns the size of the object specified by V or UnknownSize if unknown.
105 static std::optional<TypeSize> getObjectSize(const Value *V,
106                                              const DataLayout &DL,
107                                              const TargetLibraryInfo &TLI,
108                                              bool NullIsValidLoc,
109                                              bool RoundToAlign = false) {
110   uint64_t Size;
111   ObjectSizeOpts Opts;
112   Opts.RoundToAlign = RoundToAlign;
113   Opts.NullIsUnknownSize = NullIsValidLoc;
114   if (getObjectSize(V, Size, DL, &TLI, Opts))
115     return TypeSize::getFixed(Size);
116   return std::nullopt;
117 }
118 
119 /// Returns true if we can prove that the object specified by V is smaller than
120 /// Size.
121 static bool isObjectSmallerThan(const Value *V, TypeSize Size,
122                                 const DataLayout &DL,
123                                 const TargetLibraryInfo &TLI,
124                                 bool NullIsValidLoc) {
125   // Note that the meanings of the "object" are slightly different in the
126   // following contexts:
127   //    c1: llvm::getObjectSize()
128   //    c2: llvm.objectsize() intrinsic
129   //    c3: isObjectSmallerThan()
130   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
131   // refers to the "entire object".
132   //
133   //  Consider this example:
134   //     char *p = (char*)malloc(100)
135   //     char *q = p+80;
136   //
137   //  In the context of c1 and c2, the "object" pointed by q refers to the
138   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
139   //
140   //  However, in the context of c3, the "object" refers to the chunk of memory
141   // being allocated. So, the "object" has 100 bytes, and q points to the middle
142   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
143   // parameter, before the llvm::getObjectSize() is called to get the size of
144   // entire object, we should:
145   //    - either rewind the pointer q to the base-address of the object in
146   //      question (in this case rewind to p), or
147   //    - just give up. It is up to caller to make sure the pointer is pointing
148   //      to the base address the object.
149   //
150   // We go for 2nd option for simplicity.
151   if (!isIdentifiedObject(V))
152     return false;
153 
154   // This function needs to use the aligned object size because we allow
155   // reads a bit past the end given sufficient alignment.
156   std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
157                                                      /*RoundToAlign*/ true);
158 
159   return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
160 }
161 
162 /// Return the minimal extent from \p V to the end of the underlying object,
163 /// assuming the result is used in an aliasing query. E.g., we do use the query
164 /// location size and the fact that null pointers cannot alias here.
165 static TypeSize getMinimalExtentFrom(const Value &V,
166                                      const LocationSize &LocSize,
167                                      const DataLayout &DL,
168                                      bool NullIsValidLoc) {
169   // If we have dereferenceability information we know a lower bound for the
170   // extent as accesses for a lower offset would be valid. We need to exclude
171   // the "or null" part if null is a valid pointer. We can ignore frees, as an
172   // access after free would be undefined behavior.
173   bool CanBeNull, CanBeFreed;
174   uint64_t DerefBytes =
175     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
176   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
177   // If queried with a precise location size, we assume that location size to be
178   // accessed, thus valid.
179   if (LocSize.isPrecise())
180     DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
181   return TypeSize::getFixed(DerefBytes);
182 }
183 
184 /// Returns true if we can prove that the object specified by V has size Size.
185 static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
186                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
187   std::optional<TypeSize> ObjectSize =
188       getObjectSize(V, DL, TLI, NullIsValidLoc);
189   return ObjectSize && *ObjectSize == Size;
190 }
191 
192 /// Return true if both V1 and V2 are VScale
193 static bool areBothVScale(const Value *V1, const Value *V2) {
194   return PatternMatch::match(V1, PatternMatch::m_VScale()) &&
195          PatternMatch::match(V2, PatternMatch::m_VScale());
196 }
197 
198 //===----------------------------------------------------------------------===//
199 // CaptureInfo implementations
200 //===----------------------------------------------------------------------===//
201 
202 CaptureInfo::~CaptureInfo() = default;
203 
204 bool SimpleCaptureInfo::isNotCapturedBefore(const Value *Object,
205                                             const Instruction *I, bool OrAt) {
206   return isNonEscapingLocalObject(Object, &IsCapturedCache);
207 }
208 
209 static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
210                          const LoopInfo *LI) {
211   BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
212   SmallVector<BasicBlock *> Succs(successors(BB));
213   return Succs.empty() ||
214          !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
215 }
216 
217 bool EarliestEscapeInfo::isNotCapturedBefore(const Value *Object,
218                                              const Instruction *I, bool OrAt) {
219   if (!isIdentifiedFunctionLocal(Object))
220     return false;
221 
222   auto Iter = EarliestEscapes.insert({Object, nullptr});
223   if (Iter.second) {
224     Instruction *EarliestCapture = FindEarliestCapture(
225         Object, *const_cast<Function *>(DT.getRoot()->getParent()),
226         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
227     if (EarliestCapture) {
228       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
229       Ins.first->second.push_back(Object);
230     }
231     Iter.first->second = EarliestCapture;
232   }
233 
234   // No capturing instruction.
235   if (!Iter.first->second)
236     return true;
237 
238   // No context instruction means any use is capturing.
239   if (!I)
240     return false;
241 
242   if (I == Iter.first->second) {
243     if (OrAt)
244       return false;
245     return isNotInCycle(I, &DT, LI);
246   }
247 
248   return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
249 }
250 
251 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
252   auto Iter = Inst2Obj.find(I);
253   if (Iter != Inst2Obj.end()) {
254     for (const Value *Obj : Iter->second)
255       EarliestEscapes.erase(Obj);
256     Inst2Obj.erase(I);
257   }
258 }
259 
260 //===----------------------------------------------------------------------===//
261 // GetElementPtr Instruction Decomposition and Analysis
262 //===----------------------------------------------------------------------===//
263 
264 namespace {
265 /// Represents zext(sext(trunc(V))).
266 struct CastedValue {
267   const Value *V;
268   unsigned ZExtBits = 0;
269   unsigned SExtBits = 0;
270   unsigned TruncBits = 0;
271   /// Whether trunc(V) is non-negative.
272   bool IsNonNegative = false;
273 
274   explicit CastedValue(const Value *V) : V(V) {}
275   explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
276                        unsigned TruncBits, bool IsNonNegative)
277       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
278         IsNonNegative(IsNonNegative) {}
279 
280   unsigned getBitWidth() const {
281     return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
282            SExtBits;
283   }
284 
285   CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
286     return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
287                        IsNonNegative && PreserveNonNeg);
288   }
289 
290   /// Replace V with zext(NewV)
291   CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
292     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
293                         NewV->getType()->getPrimitiveSizeInBits();
294     if (ExtendBy <= TruncBits)
295       // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
296       // The nneg can be preserved on the outer zext here.
297       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
298                          IsNonNegative);
299 
300     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
301     ExtendBy -= TruncBits;
302     // zext<nneg>(zext(NewV)) == zext(NewV)
303     // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
304     // The nneg can be preserved from the inner zext here but must be dropped
305     // from the outer.
306     return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
307                        ZExtNonNegative);
308   }
309 
310   /// Replace V with sext(NewV)
311   CastedValue withSExtOfValue(const Value *NewV) const {
312     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
313                         NewV->getType()->getPrimitiveSizeInBits();
314     if (ExtendBy <= TruncBits)
315       // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
316       // The nneg can be preserved on the outer zext here
317       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
318                          IsNonNegative);
319 
320     // zext(sext(sext(NewV)))
321     ExtendBy -= TruncBits;
322     // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
323     // The nneg can be preserved on the outer zext here
324     return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
325   }
326 
327   APInt evaluateWith(APInt N) const {
328     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
329            "Incompatible bit width");
330     if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
331     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
332     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
333     return N;
334   }
335 
336   ConstantRange evaluateWith(ConstantRange N) const {
337     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
338            "Incompatible bit width");
339     if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
340     if (IsNonNegative && !N.isAllNonNegative())
341       N = N.intersectWith(
342           ConstantRange(APInt::getZero(N.getBitWidth()),
343                         APInt::getSignedMinValue(N.getBitWidth())));
344     if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
345     if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
346     return N;
347   }
348 
349   bool canDistributeOver(bool NUW, bool NSW) const {
350     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
351     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
352     // trunc(x op y) == trunc(x) op trunc(y)
353     return (!ZExtBits || NUW) && (!SExtBits || NSW);
354   }
355 
356   bool hasSameCastsAs(const CastedValue &Other) const {
357     if (V->getType() != Other.V->getType())
358       return false;
359 
360     if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
361         TruncBits == Other.TruncBits)
362       return true;
363     // If either CastedValue has a nneg zext then the sext/zext bits are
364     // interchangable for that value.
365     if (IsNonNegative || Other.IsNonNegative)
366       return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
367               TruncBits == Other.TruncBits);
368     return false;
369   }
370 };
371 
372 /// Represents zext(sext(trunc(V))) * Scale + Offset.
373 struct LinearExpression {
374   CastedValue Val;
375   APInt Scale;
376   APInt Offset;
377 
378   /// True if all operations in this expression are NSW.
379   bool IsNSW;
380 
381   LinearExpression(const CastedValue &Val, const APInt &Scale,
382                    const APInt &Offset, bool IsNSW)
383       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
384 
385   LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
386     unsigned BitWidth = Val.getBitWidth();
387     Scale = APInt(BitWidth, 1);
388     Offset = APInt(BitWidth, 0);
389   }
390 
391   LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
392     // The check for zero offset is necessary, because generally
393     // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
394     bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
395     return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
396   }
397 };
398 }
399 
400 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
401 /// B are constant integers.
402 static LinearExpression GetLinearExpression(
403     const CastedValue &Val,  const DataLayout &DL, unsigned Depth,
404     AssumptionCache *AC, DominatorTree *DT) {
405   // Limit our recursion depth.
406   if (Depth == 6)
407     return Val;
408 
409   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
410     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
411                             Val.evaluateWith(Const->getValue()), true);
412 
413   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
414     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
415       APInt RHS = Val.evaluateWith(RHSC->getValue());
416       // The only non-OBO case we deal with is or, and only limited to the
417       // case where it is both nuw and nsw.
418       bool NUW = true, NSW = true;
419       if (isa<OverflowingBinaryOperator>(BOp)) {
420         NUW &= BOp->hasNoUnsignedWrap();
421         NSW &= BOp->hasNoSignedWrap();
422       }
423       if (!Val.canDistributeOver(NUW, NSW))
424         return Val;
425 
426       // While we can distribute over trunc, we cannot preserve nowrap flags
427       // in that case.
428       if (Val.TruncBits)
429         NUW = NSW = false;
430 
431       LinearExpression E(Val);
432       switch (BOp->getOpcode()) {
433       default:
434         // We don't understand this instruction, so we can't decompose it any
435         // further.
436         return Val;
437       case Instruction::Or:
438         // X|C == X+C if it is disjoint.  Otherwise we can't analyze it.
439         if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
440           return Val;
441 
442         [[fallthrough]];
443       case Instruction::Add: {
444         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
445                                 Depth + 1, AC, DT);
446         E.Offset += RHS;
447         E.IsNSW &= NSW;
448         break;
449       }
450       case Instruction::Sub: {
451         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
452                                 Depth + 1, AC, DT);
453         E.Offset -= RHS;
454         E.IsNSW &= NSW;
455         break;
456       }
457       case Instruction::Mul:
458         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
459                                 Depth + 1, AC, DT)
460                 .mul(RHS, NSW);
461         break;
462       case Instruction::Shl:
463         // We're trying to linearize an expression of the kind:
464         //   shl i8 -128, 36
465         // where the shift count exceeds the bitwidth of the type.
466         // We can't decompose this further (the expression would return
467         // a poison value).
468         if (RHS.getLimitedValue() > Val.getBitWidth())
469           return Val;
470 
471         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), NSW), DL,
472                                 Depth + 1, AC, DT);
473         E.Offset <<= RHS.getLimitedValue();
474         E.Scale <<= RHS.getLimitedValue();
475         E.IsNSW &= NSW;
476         break;
477       }
478       return E;
479     }
480   }
481 
482   if (const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
483     return GetLinearExpression(
484         Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()), DL,
485         Depth + 1, AC, DT);
486 
487   if (isa<SExtInst>(Val.V))
488     return GetLinearExpression(
489         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
490         DL, Depth + 1, AC, DT);
491 
492   return Val;
493 }
494 
495 /// To ensure a pointer offset fits in an integer of size IndexSize
496 /// (in bits) when that size is smaller than the maximum index size. This is
497 /// an issue, for example, in particular for 32b pointers with negative indices
498 /// that rely on two's complement wrap-arounds for precise alias information
499 /// where the maximum index size is 64b.
500 static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
501   assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
502   unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
503   if (ShiftBits != 0) {
504     Offset <<= ShiftBits;
505     Offset.ashrInPlace(ShiftBits);
506   }
507 }
508 
509 namespace {
510 // A linear transformation of a Value; this class represents
511 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
512 struct VariableGEPIndex {
513   CastedValue Val;
514   APInt Scale;
515 
516   // Context instruction to use when querying information about this index.
517   const Instruction *CxtI;
518 
519   /// True if all operations in this expression are NSW.
520   bool IsNSW;
521 
522   /// True if the index should be subtracted rather than added. We don't simply
523   /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
524   /// non-wrapping, while X + INT_MIN*(-1) wraps.
525   bool IsNegated;
526 
527   bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
528     if (IsNegated == Other.IsNegated)
529       return Scale == -Other.Scale;
530     return Scale == Other.Scale;
531   }
532 
533   void dump() const {
534     print(dbgs());
535     dbgs() << "\n";
536   }
537   void print(raw_ostream &OS) const {
538     OS << "(V=" << Val.V->getName()
539        << ", zextbits=" << Val.ZExtBits
540        << ", sextbits=" << Val.SExtBits
541        << ", truncbits=" << Val.TruncBits
542        << ", scale=" << Scale
543        << ", nsw=" << IsNSW
544        << ", negated=" << IsNegated << ")";
545   }
546 };
547 }
548 
549 // Represents the internal structure of a GEP, decomposed into a base pointer,
550 // constant offsets, and variable scaled indices.
551 struct BasicAAResult::DecomposedGEP {
552   // Base pointer of the GEP
553   const Value *Base;
554   // Total constant offset from base.
555   APInt Offset;
556   // Scaled variable (non-constant) indices.
557   SmallVector<VariableGEPIndex, 4> VarIndices;
558   // Are all operations inbounds GEPs or non-indexing operations?
559   // (std::nullopt iff expression doesn't involve any geps)
560   std::optional<bool> InBounds;
561 
562   void dump() const {
563     print(dbgs());
564     dbgs() << "\n";
565   }
566   void print(raw_ostream &OS) const {
567     OS << "(DecomposedGEP Base=" << Base->getName()
568        << ", Offset=" << Offset
569        << ", VarIndices=[";
570     for (size_t i = 0; i < VarIndices.size(); i++) {
571       if (i != 0)
572         OS << ", ";
573       VarIndices[i].print(OS);
574     }
575     OS << "])";
576   }
577 };
578 
579 
580 /// If V is a symbolic pointer expression, decompose it into a base pointer
581 /// with a constant offset and a number of scaled symbolic offsets.
582 ///
583 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
584 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
585 /// specified amount, but which may have other unrepresented high bits. As
586 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
587 BasicAAResult::DecomposedGEP
588 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
589                                       AssumptionCache *AC, DominatorTree *DT) {
590   // Limit recursion depth to limit compile time in crazy cases.
591   unsigned MaxLookup = MaxLookupSearchDepth;
592   SearchTimes++;
593   const Instruction *CxtI = dyn_cast<Instruction>(V);
594 
595   unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
596   DecomposedGEP Decomposed;
597   Decomposed.Offset = APInt(MaxIndexSize, 0);
598   do {
599     // See if this is a bitcast or GEP.
600     const Operator *Op = dyn_cast<Operator>(V);
601     if (!Op) {
602       // The only non-operator case we can handle are GlobalAliases.
603       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
604         if (!GA->isInterposable()) {
605           V = GA->getAliasee();
606           continue;
607         }
608       }
609       Decomposed.Base = V;
610       return Decomposed;
611     }
612 
613     if (Op->getOpcode() == Instruction::BitCast ||
614         Op->getOpcode() == Instruction::AddrSpaceCast) {
615       V = Op->getOperand(0);
616       continue;
617     }
618 
619     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
620     if (!GEPOp) {
621       if (const auto *PHI = dyn_cast<PHINode>(V)) {
622         // Look through single-arg phi nodes created by LCSSA.
623         if (PHI->getNumIncomingValues() == 1) {
624           V = PHI->getIncomingValue(0);
625           continue;
626         }
627       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
628         // CaptureTracking can know about special capturing properties of some
629         // intrinsics like launder.invariant.group, that can't be expressed with
630         // the attributes, but have properties like returning aliasing pointer.
631         // Because some analysis may assume that nocaptured pointer is not
632         // returned from some special intrinsic (because function would have to
633         // be marked with returns attribute), it is crucial to use this function
634         // because it should be in sync with CaptureTracking. Not using it may
635         // cause weird miscompilations where 2 aliasing pointers are assumed to
636         // noalias.
637         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
638           V = RP;
639           continue;
640         }
641       }
642 
643       Decomposed.Base = V;
644       return Decomposed;
645     }
646 
647     // Track whether we've seen at least one in bounds gep, and if so, whether
648     // all geps parsed were in bounds.
649     if (Decomposed.InBounds == std::nullopt)
650       Decomposed.InBounds = GEPOp->isInBounds();
651     else if (!GEPOp->isInBounds())
652       Decomposed.InBounds = false;
653 
654     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
655 
656     unsigned AS = GEPOp->getPointerAddressSpace();
657     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
658     gep_type_iterator GTI = gep_type_begin(GEPOp);
659     unsigned IndexSize = DL.getIndexSizeInBits(AS);
660     // Assume all GEP operands are constants until proven otherwise.
661     bool GepHasConstantOffset = true;
662     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
663          I != E; ++I, ++GTI) {
664       const Value *Index = *I;
665       // Compute the (potentially symbolic) offset in bytes for this index.
666       if (StructType *STy = GTI.getStructTypeOrNull()) {
667         // For a struct, add the member offset.
668         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
669         if (FieldNo == 0)
670           continue;
671 
672         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
673         continue;
674       }
675 
676       // For an array/pointer, add the element offset, explicitly scaled.
677       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
678         if (CIdx->isZero())
679           continue;
680 
681         // Don't attempt to analyze GEPs if the scalable index is not zero.
682         TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
683         if (AllocTypeSize.isScalable()) {
684           Decomposed.Base = V;
685           return Decomposed;
686         }
687 
688         Decomposed.Offset += AllocTypeSize.getFixedValue() *
689                              CIdx->getValue().sextOrTrunc(MaxIndexSize);
690         continue;
691       }
692 
693       TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
694       if (AllocTypeSize.isScalable()) {
695         Decomposed.Base = V;
696         return Decomposed;
697       }
698 
699       GepHasConstantOffset = false;
700 
701       // If the integer type is smaller than the index size, it is implicitly
702       // sign extended or truncated to index size.
703       bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
704       bool NonNeg = NUSW && GEPOp->hasNoUnsignedWrap();
705       unsigned Width = Index->getType()->getIntegerBitWidth();
706       unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
707       unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
708       LinearExpression LE = GetLinearExpression(
709           CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
710 
711       // Scale by the type size.
712       unsigned TypeSize = AllocTypeSize.getFixedValue();
713       LE = LE.mul(APInt(IndexSize, TypeSize), NUSW);
714       Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
715       APInt Scale = LE.Scale.sext(MaxIndexSize);
716 
717       // If we already had an occurrence of this index variable, merge this
718       // scale into it.  For example, we want to handle:
719       //   A[x][x] -> x*16 + x*4 -> x*20
720       // This also ensures that 'x' only appears in the index list once.
721       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
722         if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
723              areBothVScale(Decomposed.VarIndices[i].Val.V, LE.Val.V)) &&
724             Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
725           Scale += Decomposed.VarIndices[i].Scale;
726           LE.IsNSW = false; // We cannot guarantee nsw for the merge.
727           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
728           break;
729         }
730       }
731 
732       // Make sure that we have a scale that makes sense for this target's
733       // index size.
734       adjustToIndexSize(Scale, IndexSize);
735 
736       if (!!Scale) {
737         VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
738                                   /* IsNegated */ false};
739         Decomposed.VarIndices.push_back(Entry);
740       }
741     }
742 
743     // Take care of wrap-arounds
744     if (GepHasConstantOffset)
745       adjustToIndexSize(Decomposed.Offset, IndexSize);
746 
747     // Analyze the base pointer next.
748     V = GEPOp->getOperand(0);
749   } while (--MaxLookup);
750 
751   // If the chain of expressions is too deep, just return early.
752   Decomposed.Base = V;
753   SearchLimitReached++;
754   return Decomposed;
755 }
756 
757 ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
758                                             AAQueryInfo &AAQI,
759                                             bool IgnoreLocals) {
760   assert(Visited.empty() && "Visited must be cleared after use!");
761   auto _ = make_scope_exit([&] { Visited.clear(); });
762 
763   unsigned MaxLookup = 8;
764   SmallVector<const Value *, 16> Worklist;
765   Worklist.push_back(Loc.Ptr);
766   ModRefInfo Result = ModRefInfo::NoModRef;
767 
768   do {
769     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
770     if (!Visited.insert(V).second)
771       continue;
772 
773     // Ignore allocas if we were instructed to do so.
774     if (IgnoreLocals && isa<AllocaInst>(V))
775       continue;
776 
777     // If the location points to memory that is known to be invariant for
778     // the life of the underlying SSA value, then we can exclude Mod from
779     // the set of valid memory effects.
780     //
781     // An argument that is marked readonly and noalias is known to be
782     // invariant while that function is executing.
783     if (const Argument *Arg = dyn_cast<Argument>(V)) {
784       if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
785         Result |= ModRefInfo::Ref;
786         continue;
787       }
788     }
789 
790     // A global constant can't be mutated.
791     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
792       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
793       // global to be marked constant in some modules and non-constant in
794       // others.  GV may even be a declaration, not a definition.
795       if (!GV->isConstant())
796         return ModRefInfo::ModRef;
797       continue;
798     }
799 
800     // If both select values point to local memory, then so does the select.
801     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
802       Worklist.push_back(SI->getTrueValue());
803       Worklist.push_back(SI->getFalseValue());
804       continue;
805     }
806 
807     // If all values incoming to a phi node point to local memory, then so does
808     // the phi.
809     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
810       // Don't bother inspecting phi nodes with many operands.
811       if (PN->getNumIncomingValues() > MaxLookup)
812         return ModRefInfo::ModRef;
813       append_range(Worklist, PN->incoming_values());
814       continue;
815     }
816 
817     // Otherwise be conservative.
818     return ModRefInfo::ModRef;
819   } while (!Worklist.empty() && --MaxLookup);
820 
821   // If we hit the maximum number of instructions to examine, be conservative.
822   if (!Worklist.empty())
823     return ModRefInfo::ModRef;
824 
825   return Result;
826 }
827 
828 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
829   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
830   return II && II->getIntrinsicID() == IID;
831 }
832 
833 /// Returns the behavior when calling the given call site.
834 MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
835                                               AAQueryInfo &AAQI) {
836   MemoryEffects Min = Call->getAttributes().getMemoryEffects();
837 
838   if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
839     MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
840     // Operand bundles on the call may also read or write memory, in addition
841     // to the behavior of the called function.
842     if (Call->hasReadingOperandBundles())
843       FuncME |= MemoryEffects::readOnly();
844     if (Call->hasClobberingOperandBundles())
845       FuncME |= MemoryEffects::writeOnly();
846     Min &= FuncME;
847   }
848 
849   return Min;
850 }
851 
852 /// Returns the behavior when calling the given function. For use when the call
853 /// site is not known.
854 MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
855   switch (F->getIntrinsicID()) {
856   case Intrinsic::experimental_guard:
857   case Intrinsic::experimental_deoptimize:
858     // These intrinsics can read arbitrary memory, and additionally modref
859     // inaccessible memory to model control dependence.
860     return MemoryEffects::readOnly() |
861            MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef);
862   }
863 
864   return F->getMemoryEffects();
865 }
866 
867 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
868                                            unsigned ArgIdx) {
869   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
870     return ModRefInfo::Mod;
871 
872   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
873     return ModRefInfo::Ref;
874 
875   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
876     return ModRefInfo::NoModRef;
877 
878   return ModRefInfo::ModRef;
879 }
880 
881 #ifndef NDEBUG
882 static const Function *getParent(const Value *V) {
883   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
884     if (!inst->getParent())
885       return nullptr;
886     return inst->getParent()->getParent();
887   }
888 
889   if (const Argument *arg = dyn_cast<Argument>(V))
890     return arg->getParent();
891 
892   return nullptr;
893 }
894 
895 static bool notDifferentParent(const Value *O1, const Value *O2) {
896 
897   const Function *F1 = getParent(O1);
898   const Function *F2 = getParent(O2);
899 
900   return !F1 || !F2 || F1 == F2;
901 }
902 #endif
903 
904 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
905                                  const MemoryLocation &LocB, AAQueryInfo &AAQI,
906                                  const Instruction *CtxI) {
907   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
908          "BasicAliasAnalysis doesn't support interprocedural queries.");
909   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
910 }
911 
912 /// Checks to see if the specified callsite can clobber the specified memory
913 /// object.
914 ///
915 /// Since we only look at local properties of this function, we really can't
916 /// say much about this query.  We do, however, use simple "address taken"
917 /// analysis on local objects.
918 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
919                                         const MemoryLocation &Loc,
920                                         AAQueryInfo &AAQI) {
921   assert(notDifferentParent(Call, Loc.Ptr) &&
922          "AliasAnalysis query involving multiple functions!");
923 
924   const Value *Object = getUnderlyingObject(Loc.Ptr);
925 
926   // Calls marked 'tail' cannot read or write allocas from the current frame
927   // because the current frame might be destroyed by the time they run. However,
928   // a tail call may use an alloca with byval. Calling with byval copies the
929   // contents of the alloca into argument registers or stack slots, so there is
930   // no lifetime issue.
931   if (isa<AllocaInst>(Object))
932     if (const CallInst *CI = dyn_cast<CallInst>(Call))
933       if (CI->isTailCall() &&
934           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
935         return ModRefInfo::NoModRef;
936 
937   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
938   // modify them even though the alloca is not escaped.
939   if (auto *AI = dyn_cast<AllocaInst>(Object))
940     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
941       return ModRefInfo::Mod;
942 
943   // A call can access a locally allocated object either because it is passed as
944   // an argument to the call, or because it has escaped prior to the call.
945   //
946   // Make sure the object has not escaped here, and then check that none of the
947   // call arguments alias the object below.
948   if (!isa<Constant>(Object) && Call != Object &&
949       AAQI.CI->isNotCapturedBefore(Object, Call, /*OrAt*/ false)) {
950 
951     // Optimistically assume that call doesn't touch Object and check this
952     // assumption in the following loop.
953     ModRefInfo Result = ModRefInfo::NoModRef;
954 
955     unsigned OperandNo = 0;
956     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
957          CI != CE; ++CI, ++OperandNo) {
958       if (!(*CI)->getType()->isPointerTy())
959         continue;
960 
961       // Call doesn't access memory through this operand, so we don't care
962       // if it aliases with Object.
963       if (Call->doesNotAccessMemory(OperandNo))
964         continue;
965 
966       // If this is a no-capture pointer argument, see if we can tell that it
967       // is impossible to alias the pointer we're checking.
968       AliasResult AR =
969           AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(*CI),
970                          MemoryLocation::getBeforeOrAfter(Object), AAQI);
971       // Operand doesn't alias 'Object', continue looking for other aliases
972       if (AR == AliasResult::NoAlias)
973         continue;
974       // Operand aliases 'Object', but call doesn't modify it. Strengthen
975       // initial assumption and keep looking in case if there are more aliases.
976       if (Call->onlyReadsMemory(OperandNo)) {
977         Result |= ModRefInfo::Ref;
978         continue;
979       }
980       // Operand aliases 'Object' but call only writes into it.
981       if (Call->onlyWritesMemory(OperandNo)) {
982         Result |= ModRefInfo::Mod;
983         continue;
984       }
985       // This operand aliases 'Object' and call reads and writes into it.
986       // Setting ModRef will not yield an early return below, MustAlias is not
987       // used further.
988       Result = ModRefInfo::ModRef;
989       break;
990     }
991 
992     // Early return if we improved mod ref information
993     if (!isModAndRefSet(Result))
994       return Result;
995   }
996 
997   // If the call is malloc/calloc like, we can assume that it doesn't
998   // modify any IR visible value.  This is only valid because we assume these
999   // routines do not read values visible in the IR.  TODO: Consider special
1000   // casing realloc and strdup routines which access only their arguments as
1001   // well.  Or alternatively, replace all of this with inaccessiblememonly once
1002   // that's implemented fully.
1003   if (isMallocOrCallocLikeFn(Call, &TLI)) {
1004     // Be conservative if the accessed pointer may alias the allocation -
1005     // fallback to the generic handling below.
1006     if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
1007         AliasResult::NoAlias)
1008       return ModRefInfo::NoModRef;
1009   }
1010 
1011   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1012   // writing so that proper control dependencies are maintained but they never
1013   // mod any particular memory location visible to the IR.
1014   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1015   // intrinsic is now modeled as reading memory. This prevents hoisting the
1016   // invariant.start intrinsic over stores. Consider:
1017   // *ptr = 40;
1018   // *ptr = 50;
1019   // invariant_start(ptr)
1020   // int val = *ptr;
1021   // print(val);
1022   //
1023   // This cannot be transformed to:
1024   //
1025   // *ptr = 40;
1026   // invariant_start(ptr)
1027   // *ptr = 50;
1028   // int val = *ptr;
1029   // print(val);
1030   //
1031   // The transformation will cause the second store to be ignored (based on
1032   // rules of invariant.start)  and print 40, while the first program always
1033   // prints 50.
1034   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1035     return ModRefInfo::Ref;
1036 
1037   // Be conservative.
1038   return ModRefInfo::ModRef;
1039 }
1040 
1041 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1042                                         const CallBase *Call2,
1043                                         AAQueryInfo &AAQI) {
1044   // Guard intrinsics are marked as arbitrarily writing so that proper control
1045   // dependencies are maintained but they never mods any particular memory
1046   // location.
1047   //
1048   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1049   // heap state at the point the guard is issued needs to be consistent in case
1050   // the guard invokes the "deopt" continuation.
1051 
1052   // NB! This function is *not* commutative, so we special case two
1053   // possibilities for guard intrinsics.
1054 
1055   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1056     return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1057                ? ModRefInfo::Ref
1058                : ModRefInfo::NoModRef;
1059 
1060   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1061     return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1062                ? ModRefInfo::Mod
1063                : ModRefInfo::NoModRef;
1064 
1065   // Be conservative.
1066   return ModRefInfo::ModRef;
1067 }
1068 
1069 /// Return true if we know V to the base address of the corresponding memory
1070 /// object.  This implies that any address less than V must be out of bounds
1071 /// for the underlying object.  Note that just being isIdentifiedObject() is
1072 /// not enough - For example, a negative offset from a noalias argument or call
1073 /// can be inbounds w.r.t the actual underlying object.
1074 static bool isBaseOfObject(const Value *V) {
1075   // TODO: We can handle other cases here
1076   // 1) For GC languages, arguments to functions are often required to be
1077   //    base pointers.
1078   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1079   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1080 }
1081 
1082 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1083 /// another pointer.
1084 ///
1085 /// We know that V1 is a GEP, but we don't know anything about V2.
1086 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1087 /// V2.
1088 AliasResult BasicAAResult::aliasGEP(
1089     const GEPOperator *GEP1, LocationSize V1Size,
1090     const Value *V2, LocationSize V2Size,
1091     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1092   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1093     // TODO: This limitation exists for compile-time reasons. Relax it if we
1094     // can avoid exponential pathological cases.
1095     if (!isa<GEPOperator>(V2))
1096       return AliasResult::MayAlias;
1097 
1098     // If both accesses have unknown size, we can only check whether the base
1099     // objects don't alias.
1100     AliasResult BaseAlias =
1101         AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1102                        MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1103     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1104                                              : AliasResult::MayAlias;
1105   }
1106 
1107   DominatorTree *DT = getDT(AAQI);
1108   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1109   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1110 
1111   // Bail if we were not able to decompose anything.
1112   if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1113     return AliasResult::MayAlias;
1114 
1115   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1116   // symbolic difference.
1117   subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1118 
1119   // If an inbounds GEP would have to start from an out of bounds address
1120   // for the two to alias, then we can assume noalias.
1121   // TODO: Remove !isScalable() once BasicAA fully support scalable location
1122   // size
1123   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1124       V2Size.hasValue() && !V2Size.isScalable() &&
1125       DecompGEP1.Offset.sge(V2Size.getValue()) &&
1126       isBaseOfObject(DecompGEP2.Base))
1127     return AliasResult::NoAlias;
1128 
1129   if (isa<GEPOperator>(V2)) {
1130     // Symmetric case to above.
1131     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1132         V1Size.hasValue() && !V1Size.isScalable() &&
1133         DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1134         isBaseOfObject(DecompGEP1.Base))
1135       return AliasResult::NoAlias;
1136   }
1137 
1138   // For GEPs with identical offsets, we can preserve the size and AAInfo
1139   // when performing the alias check on the underlying objects.
1140   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1141     return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1142                           MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1143 
1144   // Do the base pointers alias?
1145   AliasResult BaseAlias =
1146       AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1147                      MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1148 
1149   // If we get a No or May, then return it immediately, no amount of analysis
1150   // will improve this situation.
1151   if (BaseAlias != AliasResult::MustAlias) {
1152     assert(BaseAlias == AliasResult::NoAlias ||
1153            BaseAlias == AliasResult::MayAlias);
1154     return BaseAlias;
1155   }
1156 
1157   // If there is a constant difference between the pointers, but the difference
1158   // is less than the size of the associated memory object, then we know
1159   // that the objects are partially overlapping.  If the difference is
1160   // greater, we know they do not overlap.
1161   if (DecompGEP1.VarIndices.empty()) {
1162     APInt &Off = DecompGEP1.Offset;
1163 
1164     // Initialize for Off >= 0 (V2 <= GEP1) case.
1165     LocationSize VLeftSize = V2Size;
1166     LocationSize VRightSize = V1Size;
1167     const bool Swapped = Off.isNegative();
1168 
1169     if (Swapped) {
1170       // Swap if we have the situation where:
1171       // +                +
1172       // | BaseOffset     |
1173       // ---------------->|
1174       // |-->V1Size       |-------> V2Size
1175       // GEP1             V2
1176       std::swap(VLeftSize, VRightSize);
1177       Off = -Off;
1178     }
1179 
1180     if (!VLeftSize.hasValue())
1181       return AliasResult::MayAlias;
1182 
1183     const TypeSize LSize = VLeftSize.getValue();
1184     if (!LSize.isScalable()) {
1185       if (Off.ult(LSize)) {
1186         // Conservatively drop processing if a phi was visited and/or offset is
1187         // too big.
1188         AliasResult AR = AliasResult::PartialAlias;
1189         if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1190             Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1191           // Memory referenced by right pointer is nested. Save the offset in
1192           // cache. Note that originally offset estimated as GEP1-V2, but
1193           // AliasResult contains the shift that represents GEP1+Offset=V2.
1194           AR.setOffset(-Off.getSExtValue());
1195           AR.swap(Swapped);
1196         }
1197         return AR;
1198       }
1199       return AliasResult::NoAlias;
1200     } else {
1201       // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1202       ConstantRange CR = getVScaleRange(&F, Off.getBitWidth());
1203       bool Overflow;
1204       APInt UpperRange = CR.getUnsignedMax().umul_ov(
1205           APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1206       if (!Overflow && Off.uge(UpperRange))
1207         return AliasResult::NoAlias;
1208     }
1209   }
1210 
1211   // VScale Alias Analysis - Given one scalable offset between accesses and a
1212   // scalable typesize, we can divide each side by vscale, treating both values
1213   // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1214   if (DecompGEP1.VarIndices.size() == 1 &&
1215       DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1216       DecompGEP1.Offset.isZero() &&
1217       PatternMatch::match(DecompGEP1.VarIndices[0].Val.V,
1218                           PatternMatch::m_VScale())) {
1219     const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1220     APInt Scale =
1221         ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1222     LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1223 
1224     // Check if the offset is known to not overflow, if it does then attempt to
1225     // prove it with the known values of vscale_range.
1226     bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1227     if (Overflows) {
1228       ConstantRange CR = getVScaleRange(&F, Scale.getBitWidth());
1229       (void)CR.getSignedMax().smul_ov(Scale, Overflows);
1230     }
1231 
1232     if (!Overflows) {
1233       // Note that we do not check that the typesize is scalable, as vscale >= 1
1234       // so noalias still holds so long as the dependency distance is at least
1235       // as big as the typesize.
1236       if (VLeftSize.hasValue() &&
1237           Scale.abs().uge(VLeftSize.getValue().getKnownMinValue()))
1238         return AliasResult::NoAlias;
1239     }
1240   }
1241 
1242   // Bail on analysing scalable LocationSize
1243   if (V1Size.isScalable() || V2Size.isScalable())
1244     return AliasResult::MayAlias;
1245 
1246   // We need to know both acess sizes for all the following heuristics.
1247   if (!V1Size.hasValue() || !V2Size.hasValue())
1248     return AliasResult::MayAlias;
1249 
1250   APInt GCD;
1251   ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1252   for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1253     const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1254     const APInt &Scale = Index.Scale;
1255     APInt ScaleForGCD = Scale;
1256     if (!Index.IsNSW)
1257       ScaleForGCD =
1258           APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1259 
1260     if (i == 0)
1261       GCD = ScaleForGCD.abs();
1262     else
1263       GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1264 
1265     ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1266                                             true, &AC, Index.CxtI);
1267     KnownBits Known =
1268         computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1269     CR = CR.intersectWith(
1270         ConstantRange::fromKnownBits(Known, /* Signed */ true),
1271         ConstantRange::Signed);
1272     CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1273 
1274     assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1275            "Bit widths are normalized to MaxIndexSize");
1276     if (Index.IsNSW)
1277       CR = CR.smul_sat(ConstantRange(Scale));
1278     else
1279       CR = CR.smul_fast(ConstantRange(Scale));
1280 
1281     if (Index.IsNegated)
1282       OffsetRange = OffsetRange.sub(CR);
1283     else
1284       OffsetRange = OffsetRange.add(CR);
1285   }
1286 
1287   // We now have accesses at two offsets from the same base:
1288   //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1289   //  2. 0 with size V2Size
1290   // Using arithmetic modulo GCD, the accesses are at
1291   // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1292   // into the range [V2Size..GCD), then we know they cannot overlap.
1293   APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1294   if (ModOffset.isNegative())
1295     ModOffset += GCD; // We want mod, not rem.
1296   if (ModOffset.uge(V2Size.getValue()) &&
1297       (GCD - ModOffset).uge(V1Size.getValue()))
1298     return AliasResult::NoAlias;
1299 
1300   // Compute ranges of potentially accessed bytes for both accesses. If the
1301   // interseciton is empty, there can be no overlap.
1302   unsigned BW = OffsetRange.getBitWidth();
1303   ConstantRange Range1 = OffsetRange.add(
1304       ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1305   ConstantRange Range2 =
1306       ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1307   if (Range1.intersectWith(Range2).isEmptySet())
1308     return AliasResult::NoAlias;
1309 
1310   // Try to determine the range of values for VarIndex such that
1311   // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1312   std::optional<APInt> MinAbsVarIndex;
1313   if (DecompGEP1.VarIndices.size() == 1) {
1314     // VarIndex = Scale*V.
1315     const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1316     if (Var.Val.TruncBits == 0 &&
1317         isKnownNonZero(Var.Val.V, SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1318       // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1319       // potentially wrapping math.
1320       auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1321         if (Var.IsNSW)
1322           return true;
1323 
1324         int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1325         // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1326         // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1327         // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1328         int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1329         if (MaxScaleValueBW <= 0)
1330           return false;
1331         return Var.Scale.ule(
1332             APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1333       };
1334       // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1335       // presence of potentially wrapping math.
1336       if (MultiplyByScaleNoWrap(Var)) {
1337         // If V != 0 then abs(VarIndex) >= abs(Scale).
1338         MinAbsVarIndex = Var.Scale.abs();
1339       }
1340     }
1341   } else if (DecompGEP1.VarIndices.size() == 2) {
1342     // VarIndex = Scale*V0 + (-Scale)*V1.
1343     // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1344     // Check that MayBeCrossIteration is false, to avoid reasoning about
1345     // inequality of values across loop iterations.
1346     const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1347     const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1348     if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1349         Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1350         isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1351                         DT))
1352       MinAbsVarIndex = Var0.Scale.abs();
1353   }
1354 
1355   if (MinAbsVarIndex) {
1356     // The constant offset will have added at least +/-MinAbsVarIndex to it.
1357     APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1358     APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1359     // We know that Offset <= OffsetLo || Offset >= OffsetHi
1360     if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1361         OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1362       return AliasResult::NoAlias;
1363   }
1364 
1365   if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1366     return AliasResult::NoAlias;
1367 
1368   // Statically, we can see that the base objects are the same, but the
1369   // pointers have dynamic offsets which we can't resolve. And none of our
1370   // little tricks above worked.
1371   return AliasResult::MayAlias;
1372 }
1373 
1374 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1375   // If the results agree, take it.
1376   if (A == B)
1377     return A;
1378   // A mix of PartialAlias and MustAlias is PartialAlias.
1379   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1380       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1381     return AliasResult::PartialAlias;
1382   // Otherwise, we don't know anything.
1383   return AliasResult::MayAlias;
1384 }
1385 
1386 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1387 /// against another.
1388 AliasResult
1389 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1390                            const Value *V2, LocationSize V2Size,
1391                            AAQueryInfo &AAQI) {
1392   // If the values are Selects with the same condition, we can do a more precise
1393   // check: just check for aliases between the values on corresponding arms.
1394   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1395     if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1396                                       AAQI)) {
1397       AliasResult Alias =
1398           AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1399                          MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1400       if (Alias == AliasResult::MayAlias)
1401         return AliasResult::MayAlias;
1402       AliasResult ThisAlias =
1403           AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1404                          MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1405       return MergeAliasResults(ThisAlias, Alias);
1406     }
1407 
1408   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1409   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1410   AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1411                                      MemoryLocation(V2, V2Size), AAQI);
1412   if (Alias == AliasResult::MayAlias)
1413     return AliasResult::MayAlias;
1414 
1415   AliasResult ThisAlias =
1416       AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1417                      MemoryLocation(V2, V2Size), AAQI);
1418   return MergeAliasResults(ThisAlias, Alias);
1419 }
1420 
1421 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1422 /// another.
1423 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1424                                     const Value *V2, LocationSize V2Size,
1425                                     AAQueryInfo &AAQI) {
1426   if (!PN->getNumIncomingValues())
1427     return AliasResult::NoAlias;
1428   // If the values are PHIs in the same block, we can do a more precise
1429   // as well as efficient check: just check for aliases between the values
1430   // on corresponding edges.
1431   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1432     if (PN2->getParent() == PN->getParent()) {
1433       std::optional<AliasResult> Alias;
1434       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1435         AliasResult ThisAlias = AAQI.AAR.alias(
1436             MemoryLocation(PN->getIncomingValue(i), PNSize),
1437             MemoryLocation(
1438                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1439             AAQI);
1440         if (Alias)
1441           *Alias = MergeAliasResults(*Alias, ThisAlias);
1442         else
1443           Alias = ThisAlias;
1444         if (*Alias == AliasResult::MayAlias)
1445           break;
1446       }
1447       return *Alias;
1448     }
1449 
1450   SmallVector<Value *, 4> V1Srcs;
1451   // If a phi operand recurses back to the phi, we can still determine NoAlias
1452   // if we don't alias the underlying objects of the other phi operands, as we
1453   // know that the recursive phi needs to be based on them in some way.
1454   bool isRecursive = false;
1455   auto CheckForRecPhi = [&](Value *PV) {
1456     if (!EnableRecPhiAnalysis)
1457       return false;
1458     if (getUnderlyingObject(PV) == PN) {
1459       isRecursive = true;
1460       return true;
1461     }
1462     return false;
1463   };
1464 
1465   SmallPtrSet<Value *, 4> UniqueSrc;
1466   Value *OnePhi = nullptr;
1467   for (Value *PV1 : PN->incoming_values()) {
1468     // Skip the phi itself being the incoming value.
1469     if (PV1 == PN)
1470       continue;
1471 
1472     if (isa<PHINode>(PV1)) {
1473       if (OnePhi && OnePhi != PV1) {
1474         // To control potential compile time explosion, we choose to be
1475         // conserviate when we have more than one Phi input.  It is important
1476         // that we handle the single phi case as that lets us handle LCSSA
1477         // phi nodes and (combined with the recursive phi handling) simple
1478         // pointer induction variable patterns.
1479         return AliasResult::MayAlias;
1480       }
1481       OnePhi = PV1;
1482     }
1483 
1484     if (CheckForRecPhi(PV1))
1485       continue;
1486 
1487     if (UniqueSrc.insert(PV1).second)
1488       V1Srcs.push_back(PV1);
1489   }
1490 
1491   if (OnePhi && UniqueSrc.size() > 1)
1492     // Out of an abundance of caution, allow only the trivial lcssa and
1493     // recursive phi cases.
1494     return AliasResult::MayAlias;
1495 
1496   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1497   // value. This should only be possible in blocks unreachable from the entry
1498   // block, but return MayAlias just in case.
1499   if (V1Srcs.empty())
1500     return AliasResult::MayAlias;
1501 
1502   // If this PHI node is recursive, indicate that the pointer may be moved
1503   // across iterations. We can only prove NoAlias if different underlying
1504   // objects are involved.
1505   if (isRecursive)
1506     PNSize = LocationSize::beforeOrAfterPointer();
1507 
1508   // In the recursive alias queries below, we may compare values from two
1509   // different loop iterations.
1510   SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1511 
1512   AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1513                                      MemoryLocation(V2, V2Size), AAQI);
1514 
1515   // Early exit if the check of the first PHI source against V2 is MayAlias.
1516   // Other results are not possible.
1517   if (Alias == AliasResult::MayAlias)
1518     return AliasResult::MayAlias;
1519   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1520   // remain valid to all elements and needs to conservatively return MayAlias.
1521   if (isRecursive && Alias != AliasResult::NoAlias)
1522     return AliasResult::MayAlias;
1523 
1524   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1525   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1526   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1527     Value *V = V1Srcs[i];
1528 
1529     AliasResult ThisAlias = AAQI.AAR.alias(
1530         MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1531     Alias = MergeAliasResults(ThisAlias, Alias);
1532     if (Alias == AliasResult::MayAlias)
1533       break;
1534   }
1535 
1536   return Alias;
1537 }
1538 
1539 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1540 /// array references.
1541 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1542                                       const Value *V2, LocationSize V2Size,
1543                                       AAQueryInfo &AAQI,
1544                                       const Instruction *CtxI) {
1545   // If either of the memory references is empty, it doesn't matter what the
1546   // pointer values are.
1547   if (V1Size.isZero() || V2Size.isZero())
1548     return AliasResult::NoAlias;
1549 
1550   // Strip off any casts if they exist.
1551   V1 = V1->stripPointerCastsForAliasAnalysis();
1552   V2 = V2->stripPointerCastsForAliasAnalysis();
1553 
1554   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1555   // value for undef that aliases nothing in the program.
1556   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1557     return AliasResult::NoAlias;
1558 
1559   // Are we checking for alias of the same value?
1560   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1561   // different iterations. We must therefore make sure that this is not the
1562   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1563   // happen by looking at the visited phi nodes and making sure they cannot
1564   // reach the value.
1565   if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1566     return AliasResult::MustAlias;
1567 
1568   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1569     return AliasResult::NoAlias; // Scalars cannot alias each other
1570 
1571   // Figure out what objects these things are pointing to if we can.
1572   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1573   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1574 
1575   // Null values in the default address space don't point to any object, so they
1576   // don't alias any other pointer.
1577   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1578     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1579       return AliasResult::NoAlias;
1580   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1581     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1582       return AliasResult::NoAlias;
1583 
1584   if (O1 != O2) {
1585     // If V1/V2 point to two different objects, we know that we have no alias.
1586     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1587       return AliasResult::NoAlias;
1588 
1589     // Function arguments can't alias with things that are known to be
1590     // unambigously identified at the function level.
1591     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1592         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1593       return AliasResult::NoAlias;
1594 
1595     // If one pointer is the result of a call/invoke or load and the other is a
1596     // non-escaping local object within the same function, then we know the
1597     // object couldn't escape to a point where the call could return it.
1598     //
1599     // Note that if the pointers are in different functions, there are a
1600     // variety of complications. A call with a nocapture argument may still
1601     // temporary store the nocapture argument's value in a temporary memory
1602     // location if that memory location doesn't escape. Or it may pass a
1603     // nocapture value to other functions as long as they don't capture it.
1604     if (isEscapeSource(O1) && AAQI.CI->isNotCapturedBefore(
1605                                   O2, dyn_cast<Instruction>(O1), /*OrAt*/ true))
1606       return AliasResult::NoAlias;
1607     if (isEscapeSource(O2) && AAQI.CI->isNotCapturedBefore(
1608                                   O1, dyn_cast<Instruction>(O2), /*OrAt*/ true))
1609       return AliasResult::NoAlias;
1610   }
1611 
1612   // If the size of one access is larger than the entire object on the other
1613   // side, then we know such behavior is undefined and can assume no alias.
1614   bool NullIsValidLocation = NullPointerIsDefined(&F);
1615   if ((isObjectSmallerThan(
1616           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1617           TLI, NullIsValidLocation)) ||
1618       (isObjectSmallerThan(
1619           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1620           TLI, NullIsValidLocation)))
1621     return AliasResult::NoAlias;
1622 
1623   if (EnableSeparateStorageAnalysis) {
1624     for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1625       if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1626         continue;
1627 
1628       AssumeInst *Assume = cast<AssumeInst>(Elem);
1629       OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1630       if (OBU.getTagName() == "separate_storage") {
1631         assert(OBU.Inputs.size() == 2);
1632         const Value *Hint1 = OBU.Inputs[0].get();
1633         const Value *Hint2 = OBU.Inputs[1].get();
1634         // This is often a no-op; instcombine rewrites this for us. No-op
1635         // getUnderlyingObject calls are fast, though.
1636         const Value *HintO1 = getUnderlyingObject(Hint1);
1637         const Value *HintO2 = getUnderlyingObject(Hint2);
1638 
1639         DominatorTree *DT = getDT(AAQI);
1640         auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1641           if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1642             return isValidAssumeForContext(Assume, PtrI, DT,
1643                                            /* AllowEphemerals */ true);
1644           }
1645           if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1646             const Instruction *FirstI =
1647                 &*PtrA->getParent()->getEntryBlock().begin();
1648             return isValidAssumeForContext(Assume, FirstI, DT,
1649                                            /* AllowEphemerals */ true);
1650           }
1651           return false;
1652         };
1653 
1654         if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1655           // Note that we go back to V1 and V2 for the
1656           // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1657           // so strictly more assumptions are valid for them.
1658           if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1659                                                /* AllowEphemerals */ true)) ||
1660               ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1661             return AliasResult::NoAlias;
1662           }
1663         }
1664       }
1665     }
1666   }
1667 
1668   // If one the accesses may be before the accessed pointer, canonicalize this
1669   // by using unknown after-pointer sizes for both accesses. This is
1670   // equivalent, because regardless of which pointer is lower, one of them
1671   // will always came after the other, as long as the underlying objects aren't
1672   // disjoint. We do this so that the rest of BasicAA does not have to deal
1673   // with accesses before the base pointer, and to improve cache utilization by
1674   // merging equivalent states.
1675   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1676     V1Size = LocationSize::afterPointer();
1677     V2Size = LocationSize::afterPointer();
1678   }
1679 
1680   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1681   // for recursive queries. For this reason, this limit is chosen to be large
1682   // enough to be very rarely hit, while still being small enough to avoid
1683   // stack overflows.
1684   if (AAQI.Depth >= 512)
1685     return AliasResult::MayAlias;
1686 
1687   // Check the cache before climbing up use-def chains. This also terminates
1688   // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1689   // cache key, because some cases where MayBeCrossIteration==false returns
1690   // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1691   AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1692                             {V2, V2Size, AAQI.MayBeCrossIteration});
1693   const bool Swapped = V1 > V2;
1694   if (Swapped)
1695     std::swap(Locs.first, Locs.second);
1696   const auto &Pair = AAQI.AliasCache.try_emplace(
1697       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1698   if (!Pair.second) {
1699     auto &Entry = Pair.first->second;
1700     if (!Entry.isDefinitive()) {
1701       // Remember that we used an assumption. This may either be a direct use
1702       // of an assumption, or a use of an entry that may itself be based on an
1703       // assumption.
1704       ++AAQI.NumAssumptionUses;
1705       if (Entry.isAssumption())
1706         ++Entry.NumAssumptionUses;
1707     }
1708     // Cache contains sorted {V1,V2} pairs but we should return original order.
1709     auto Result = Entry.Result;
1710     Result.swap(Swapped);
1711     return Result;
1712   }
1713 
1714   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1715   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1716   AliasResult Result =
1717       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1718 
1719   auto It = AAQI.AliasCache.find(Locs);
1720   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1721   auto &Entry = It->second;
1722 
1723   // Check whether a NoAlias assumption has been used, but disproven.
1724   bool AssumptionDisproven =
1725       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1726   if (AssumptionDisproven)
1727     Result = AliasResult::MayAlias;
1728 
1729   // This is a definitive result now, when considered as a root query.
1730   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1731   Entry.Result = Result;
1732   // Cache contains sorted {V1,V2} pairs.
1733   Entry.Result.swap(Swapped);
1734 
1735   // If the assumption has been disproven, remove any results that may have
1736   // been based on this assumption. Do this after the Entry updates above to
1737   // avoid iterator invalidation.
1738   if (AssumptionDisproven)
1739     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1740       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1741 
1742   // The result may still be based on assumptions higher up in the chain.
1743   // Remember it, so it can be purged from the cache later.
1744   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1745       Result != AliasResult::MayAlias) {
1746     AAQI.AssumptionBasedResults.push_back(Locs);
1747     Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased;
1748   } else {
1749     Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1750   }
1751 
1752   // Depth is incremented before this function is called, so Depth==1 indicates
1753   // a root query.
1754   if (AAQI.Depth == 1) {
1755     // Any remaining assumption based results must be based on proven
1756     // assumptions, so convert them to definitive results.
1757     for (const auto &Loc : AAQI.AssumptionBasedResults) {
1758       auto It = AAQI.AliasCache.find(Loc);
1759       if (It != AAQI.AliasCache.end())
1760         It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1761     }
1762     AAQI.AssumptionBasedResults.clear();
1763     AAQI.NumAssumptionUses = 0;
1764   }
1765   return Result;
1766 }
1767 
1768 AliasResult BasicAAResult::aliasCheckRecursive(
1769     const Value *V1, LocationSize V1Size,
1770     const Value *V2, LocationSize V2Size,
1771     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1772   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1773     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1774     if (Result != AliasResult::MayAlias)
1775       return Result;
1776   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1777     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1778     Result.swap();
1779     if (Result != AliasResult::MayAlias)
1780       return Result;
1781   }
1782 
1783   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1784     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1785     if (Result != AliasResult::MayAlias)
1786       return Result;
1787   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1788     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1789     Result.swap();
1790     if (Result != AliasResult::MayAlias)
1791       return Result;
1792   }
1793 
1794   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1795     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1796     if (Result != AliasResult::MayAlias)
1797       return Result;
1798   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1799     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1800     Result.swap();
1801     if (Result != AliasResult::MayAlias)
1802       return Result;
1803   }
1804 
1805   // If both pointers are pointing into the same object and one of them
1806   // accesses the entire object, then the accesses must overlap in some way.
1807   if (O1 == O2) {
1808     bool NullIsValidLocation = NullPointerIsDefined(&F);
1809     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1810         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1811          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1812       return AliasResult::PartialAlias;
1813   }
1814 
1815   return AliasResult::MayAlias;
1816 }
1817 
1818 /// Check whether two Values can be considered equivalent.
1819 ///
1820 /// If the values may come from different cycle iterations, this will also
1821 /// check that the values are not part of cycle. We have to do this because we
1822 /// are looking through phi nodes, that is we say
1823 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1824 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1825                                                   const Value *V2,
1826                                                   const AAQueryInfo &AAQI) {
1827   if (V != V2)
1828     return false;
1829 
1830   if (!AAQI.MayBeCrossIteration)
1831     return true;
1832 
1833   // Non-instructions and instructions in the entry block cannot be part of
1834   // a loop.
1835   const Instruction *Inst = dyn_cast<Instruction>(V);
1836   if (!Inst || Inst->getParent()->isEntryBlock())
1837     return true;
1838 
1839   return isNotInCycle(Inst, getDT(AAQI), /*LI*/ nullptr);
1840 }
1841 
1842 /// Computes the symbolic difference between two de-composed GEPs.
1843 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1844                                            const DecomposedGEP &SrcGEP,
1845                                            const AAQueryInfo &AAQI) {
1846   DestGEP.Offset -= SrcGEP.Offset;
1847   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1848     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1849     // than a few variable indexes.
1850     bool Found = false;
1851     for (auto I : enumerate(DestGEP.VarIndices)) {
1852       VariableGEPIndex &Dest = I.value();
1853       if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1854            !areBothVScale(Dest.Val.V, Src.Val.V)) ||
1855           !Dest.Val.hasSameCastsAs(Src.Val))
1856         continue;
1857 
1858       // Normalize IsNegated if we're going to lose the NSW flag anyway.
1859       if (Dest.IsNegated) {
1860         Dest.Scale = -Dest.Scale;
1861         Dest.IsNegated = false;
1862         Dest.IsNSW = false;
1863       }
1864 
1865       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1866       // goes to zero, remove the entry.
1867       if (Dest.Scale != Src.Scale) {
1868         Dest.Scale -= Src.Scale;
1869         Dest.IsNSW = false;
1870       } else {
1871         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1872       }
1873       Found = true;
1874       break;
1875     }
1876 
1877     // If we didn't consume this entry, add it to the end of the Dest list.
1878     if (!Found) {
1879       VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1880                                 /* IsNegated */ true};
1881       DestGEP.VarIndices.push_back(Entry);
1882     }
1883   }
1884 }
1885 
1886 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1887                                             LocationSize MaybeV1Size,
1888                                             LocationSize MaybeV2Size,
1889                                             AssumptionCache *AC,
1890                                             DominatorTree *DT,
1891                                             const AAQueryInfo &AAQI) {
1892   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1893       !MaybeV2Size.hasValue())
1894     return false;
1895 
1896   const uint64_t V1Size = MaybeV1Size.getValue();
1897   const uint64_t V2Size = MaybeV2Size.getValue();
1898 
1899   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1900 
1901   if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1902       !Var0.hasNegatedScaleOf(Var1) ||
1903       Var0.Val.V->getType() != Var1.Val.V->getType())
1904     return false;
1905 
1906   // We'll strip off the Extensions of Var0 and Var1 and do another round
1907   // of GetLinearExpression decomposition. In the example above, if Var0
1908   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1909 
1910   LinearExpression E0 =
1911       GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1912   LinearExpression E1 =
1913       GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1914   if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1915       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1916     return false;
1917 
1918   // We have a hit - Var0 and Var1 only differ by a constant offset!
1919 
1920   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1921   // Var1 is possible to calculate, but we're just interested in the absolute
1922   // minimum difference between the two. The minimum distance may occur due to
1923   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1924   // the minimum distance between %i and %i + 5 is 3.
1925   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1926   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1927   APInt MinDiffBytes =
1928     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1929 
1930   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1931   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1932   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1933   // V2Size can fit in the MinDiffBytes gap.
1934   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1935          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1936 }
1937 
1938 //===----------------------------------------------------------------------===//
1939 // BasicAliasAnalysis Pass
1940 //===----------------------------------------------------------------------===//
1941 
1942 AnalysisKey BasicAA::Key;
1943 
1944 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1945   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1946   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1947   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1948   return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
1949 }
1950 
1951 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1952   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1953 }
1954 
1955 char BasicAAWrapperPass::ID = 0;
1956 
1957 void BasicAAWrapperPass::anchor() {}
1958 
1959 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1960                       "Basic Alias Analysis (stateless AA impl)", true, true)
1961 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1962 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1963 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1964 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1965                     "Basic Alias Analysis (stateless AA impl)", true, true)
1966 
1967 FunctionPass *llvm::createBasicAAWrapperPass() {
1968   return new BasicAAWrapperPass();
1969 }
1970 
1971 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1972   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1973   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1974   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1975 
1976   Result.reset(new BasicAAResult(F.getDataLayout(), F,
1977                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1978                                  &DTWP.getDomTree()));
1979 
1980   return false;
1981 }
1982 
1983 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1984   AU.setPreservesAll();
1985   AU.addRequiredTransitive<AssumptionCacheTracker>();
1986   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1987   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1988 }
1989