xref: /llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp (revision b9bba6ca9fc62c5ae3ee402196b11a523a500fdc)
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/PatternMatch.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include "llvm/Support/SaveAndRestore.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <optional>
62 #include <utility>
63 
64 #define DEBUG_TYPE "basicaa"
65 
66 using namespace llvm;
67 
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70                                           cl::init(true));
71 
72 static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
73                                                    cl::Hidden, cl::init(true));
74 
75 /// SearchLimitReached / SearchTimes shows how often the limit of
76 /// to decompose GEPs is reached. It will affect the precision
77 /// of basic alias analysis.
78 STATISTIC(SearchLimitReached, "Number of times the limit to "
79                               "decompose GEPs is reached");
80 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
81 
82 // The max limit of the search depth in DecomposeGEPExpression() and
83 // getUnderlyingObject().
84 static const unsigned MaxLookupSearchDepth = 6;
85 
86 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
87                                FunctionAnalysisManager::Invalidator &Inv) {
88   // We don't care if this analysis itself is preserved, it has no state. But
89   // we need to check that the analyses it depends on have been. Note that we
90   // may be created without handles to some analyses and in that case don't
91   // depend on them.
92   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
93       (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
94     return true;
95 
96   // Otherwise this analysis result remains valid.
97   return false;
98 }
99 
100 //===----------------------------------------------------------------------===//
101 // Useful predicates
102 //===----------------------------------------------------------------------===//
103 
104 /// Returns the size of the object specified by V or UnknownSize if unknown.
105 static std::optional<TypeSize> getObjectSize(const Value *V,
106                                              const DataLayout &DL,
107                                              const TargetLibraryInfo &TLI,
108                                              bool NullIsValidLoc,
109                                              bool RoundToAlign = false) {
110   uint64_t Size;
111   ObjectSizeOpts Opts;
112   Opts.RoundToAlign = RoundToAlign;
113   Opts.NullIsUnknownSize = NullIsValidLoc;
114   if (getObjectSize(V, Size, DL, &TLI, Opts))
115     return TypeSize::getFixed(Size);
116   return std::nullopt;
117 }
118 
119 /// Returns true if we can prove that the object specified by V is smaller than
120 /// Size.
121 static bool isObjectSmallerThan(const Value *V, TypeSize Size,
122                                 const DataLayout &DL,
123                                 const TargetLibraryInfo &TLI,
124                                 bool NullIsValidLoc) {
125   // Note that the meanings of the "object" are slightly different in the
126   // following contexts:
127   //    c1: llvm::getObjectSize()
128   //    c2: llvm.objectsize() intrinsic
129   //    c3: isObjectSmallerThan()
130   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
131   // refers to the "entire object".
132   //
133   //  Consider this example:
134   //     char *p = (char*)malloc(100)
135   //     char *q = p+80;
136   //
137   //  In the context of c1 and c2, the "object" pointed by q refers to the
138   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
139   //
140   //  However, in the context of c3, the "object" refers to the chunk of memory
141   // being allocated. So, the "object" has 100 bytes, and q points to the middle
142   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
143   // parameter, before the llvm::getObjectSize() is called to get the size of
144   // entire object, we should:
145   //    - either rewind the pointer q to the base-address of the object in
146   //      question (in this case rewind to p), or
147   //    - just give up. It is up to caller to make sure the pointer is pointing
148   //      to the base address the object.
149   //
150   // We go for 2nd option for simplicity.
151   if (!isIdentifiedObject(V))
152     return false;
153 
154   // This function needs to use the aligned object size because we allow
155   // reads a bit past the end given sufficient alignment.
156   std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
157                                                      /*RoundToAlign*/ true);
158 
159   return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
160 }
161 
162 /// Return the minimal extent from \p V to the end of the underlying object,
163 /// assuming the result is used in an aliasing query. E.g., we do use the query
164 /// location size and the fact that null pointers cannot alias here.
165 static TypeSize getMinimalExtentFrom(const Value &V,
166                                      const LocationSize &LocSize,
167                                      const DataLayout &DL,
168                                      bool NullIsValidLoc) {
169   // If we have dereferenceability information we know a lower bound for the
170   // extent as accesses for a lower offset would be valid. We need to exclude
171   // the "or null" part if null is a valid pointer. We can ignore frees, as an
172   // access after free would be undefined behavior.
173   bool CanBeNull, CanBeFreed;
174   uint64_t DerefBytes =
175     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
176   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
177   // If queried with a precise location size, we assume that location size to be
178   // accessed, thus valid.
179   if (LocSize.isPrecise())
180     DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
181   return TypeSize::getFixed(DerefBytes);
182 }
183 
184 /// Returns true if we can prove that the object specified by V has size Size.
185 static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
186                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
187   std::optional<TypeSize> ObjectSize =
188       getObjectSize(V, DL, TLI, NullIsValidLoc);
189   return ObjectSize && *ObjectSize == Size;
190 }
191 
192 /// Return true if both V1 and V2 are VScale
193 static bool areBothVScale(const Value *V1, const Value *V2) {
194   return PatternMatch::match(V1, PatternMatch::m_VScale()) &&
195          PatternMatch::match(V2, PatternMatch::m_VScale());
196 }
197 
198 //===----------------------------------------------------------------------===//
199 // CaptureInfo implementations
200 //===----------------------------------------------------------------------===//
201 
202 CaptureInfo::~CaptureInfo() = default;
203 
204 bool SimpleCaptureInfo::isNotCapturedBefore(const Value *Object,
205                                             const Instruction *I, bool OrAt) {
206   return isNonEscapingLocalObject(Object, &IsCapturedCache);
207 }
208 
209 static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
210                          const LoopInfo *LI) {
211   BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
212   SmallVector<BasicBlock *> Succs(successors(BB));
213   return Succs.empty() ||
214          !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
215 }
216 
217 bool EarliestEscapeInfo::isNotCapturedBefore(const Value *Object,
218                                              const Instruction *I, bool OrAt) {
219   if (!isIdentifiedFunctionLocal(Object))
220     return false;
221 
222   auto Iter = EarliestEscapes.insert({Object, nullptr});
223   if (Iter.second) {
224     Instruction *EarliestCapture = FindEarliestCapture(
225         Object, *const_cast<Function *>(DT.getRoot()->getParent()),
226         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
227     if (EarliestCapture) {
228       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
229       Ins.first->second.push_back(Object);
230     }
231     Iter.first->second = EarliestCapture;
232   }
233 
234   // No capturing instruction.
235   if (!Iter.first->second)
236     return true;
237 
238   // No context instruction means any use is capturing.
239   if (!I)
240     return false;
241 
242   if (I == Iter.first->second) {
243     if (OrAt)
244       return false;
245     return isNotInCycle(I, &DT, LI);
246   }
247 
248   return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
249 }
250 
251 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
252   auto Iter = Inst2Obj.find(I);
253   if (Iter != Inst2Obj.end()) {
254     for (const Value *Obj : Iter->second)
255       EarliestEscapes.erase(Obj);
256     Inst2Obj.erase(I);
257   }
258 }
259 
260 //===----------------------------------------------------------------------===//
261 // GetElementPtr Instruction Decomposition and Analysis
262 //===----------------------------------------------------------------------===//
263 
264 namespace {
265 /// Represents zext(sext(trunc(V))).
266 struct CastedValue {
267   const Value *V;
268   unsigned ZExtBits = 0;
269   unsigned SExtBits = 0;
270   unsigned TruncBits = 0;
271   /// Whether trunc(V) is non-negative.
272   bool IsNonNegative = false;
273 
274   explicit CastedValue(const Value *V) : V(V) {}
275   explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
276                        unsigned TruncBits, bool IsNonNegative)
277       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
278         IsNonNegative(IsNonNegative) {}
279 
280   unsigned getBitWidth() const {
281     return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
282            SExtBits;
283   }
284 
285   CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
286     return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
287                        IsNonNegative && PreserveNonNeg);
288   }
289 
290   /// Replace V with zext(NewV)
291   CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
292     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
293                         NewV->getType()->getPrimitiveSizeInBits();
294     if (ExtendBy <= TruncBits)
295       // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
296       // The nneg can be preserved on the outer zext here.
297       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
298                          IsNonNegative);
299 
300     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
301     ExtendBy -= TruncBits;
302     // zext<nneg>(zext(NewV)) == zext(NewV)
303     // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
304     // The nneg can be preserved from the inner zext here but must be dropped
305     // from the outer.
306     return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
307                        ZExtNonNegative);
308   }
309 
310   /// Replace V with sext(NewV)
311   CastedValue withSExtOfValue(const Value *NewV) const {
312     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
313                         NewV->getType()->getPrimitiveSizeInBits();
314     if (ExtendBy <= TruncBits)
315       // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
316       // The nneg can be preserved on the outer zext here
317       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
318                          IsNonNegative);
319 
320     // zext(sext(sext(NewV)))
321     ExtendBy -= TruncBits;
322     // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
323     // The nneg can be preserved on the outer zext here
324     return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
325   }
326 
327   APInt evaluateWith(APInt N) const {
328     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
329            "Incompatible bit width");
330     if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
331     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
332     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
333     return N;
334   }
335 
336   ConstantRange evaluateWith(ConstantRange N) const {
337     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
338            "Incompatible bit width");
339     if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
340     if (IsNonNegative && !N.isAllNonNegative())
341       N = N.intersectWith(
342           ConstantRange(APInt::getZero(N.getBitWidth()),
343                         APInt::getSignedMinValue(N.getBitWidth())));
344     if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
345     if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
346     return N;
347   }
348 
349   bool canDistributeOver(bool NUW, bool NSW) const {
350     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
351     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
352     // trunc(x op y) == trunc(x) op trunc(y)
353     return (!ZExtBits || NUW) && (!SExtBits || NSW);
354   }
355 
356   bool hasSameCastsAs(const CastedValue &Other) const {
357     if (V->getType() != Other.V->getType())
358       return false;
359 
360     if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
361         TruncBits == Other.TruncBits)
362       return true;
363     // If either CastedValue has a nneg zext then the sext/zext bits are
364     // interchangable for that value.
365     if (IsNonNegative || Other.IsNonNegative)
366       return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
367               TruncBits == Other.TruncBits);
368     return false;
369   }
370 };
371 
372 /// Represents zext(sext(trunc(V))) * Scale + Offset.
373 struct LinearExpression {
374   CastedValue Val;
375   APInt Scale;
376   APInt Offset;
377 
378   /// True if all operations in this expression are NUW.
379   bool IsNUW;
380   /// True if all operations in this expression are NSW.
381   bool IsNSW;
382 
383   LinearExpression(const CastedValue &Val, const APInt &Scale,
384                    const APInt &Offset, bool IsNUW, bool IsNSW)
385       : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
386 
387   LinearExpression(const CastedValue &Val)
388       : Val(Val), IsNUW(true), IsNSW(true) {
389     unsigned BitWidth = Val.getBitWidth();
390     Scale = APInt(BitWidth, 1);
391     Offset = APInt(BitWidth, 0);
392   }
393 
394   LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const {
395     // The check for zero offset is necessary, because generally
396     // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
397     bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
398     bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
399     return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW);
400   }
401 };
402 }
403 
404 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
405 /// B are constant integers.
406 static LinearExpression GetLinearExpression(
407     const CastedValue &Val,  const DataLayout &DL, unsigned Depth,
408     AssumptionCache *AC, DominatorTree *DT) {
409   // Limit our recursion depth.
410   if (Depth == 6)
411     return Val;
412 
413   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
414     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
415                             Val.evaluateWith(Const->getValue()), true, true);
416 
417   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
418     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
419       APInt RHS = Val.evaluateWith(RHSC->getValue());
420       // The only non-OBO case we deal with is or, and only limited to the
421       // case where it is both nuw and nsw.
422       bool NUW = true, NSW = true;
423       if (isa<OverflowingBinaryOperator>(BOp)) {
424         NUW &= BOp->hasNoUnsignedWrap();
425         NSW &= BOp->hasNoSignedWrap();
426       }
427       if (!Val.canDistributeOver(NUW, NSW))
428         return Val;
429 
430       // While we can distribute over trunc, we cannot preserve nowrap flags
431       // in that case.
432       if (Val.TruncBits)
433         NUW = NSW = false;
434 
435       LinearExpression E(Val);
436       switch (BOp->getOpcode()) {
437       default:
438         // We don't understand this instruction, so we can't decompose it any
439         // further.
440         return Val;
441       case Instruction::Or:
442         // X|C == X+C if it is disjoint.  Otherwise we can't analyze it.
443         if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
444           return Val;
445 
446         [[fallthrough]];
447       case Instruction::Add: {
448         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
449                                 Depth + 1, AC, DT);
450         E.Offset += RHS;
451         E.IsNUW &= NUW;
452         E.IsNSW &= NSW;
453         break;
454       }
455       case Instruction::Sub: {
456         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
457                                 Depth + 1, AC, DT);
458         E.Offset -= RHS;
459         E.IsNUW = false; // sub nuw x, y is not add nuw x, -y.
460         E.IsNSW &= NSW;
461         break;
462       }
463       case Instruction::Mul:
464         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
465                                 Depth + 1, AC, DT)
466                 .mul(RHS, NUW, NSW);
467         break;
468       case Instruction::Shl:
469         // We're trying to linearize an expression of the kind:
470         //   shl i8 -128, 36
471         // where the shift count exceeds the bitwidth of the type.
472         // We can't decompose this further (the expression would return
473         // a poison value).
474         if (RHS.getLimitedValue() > Val.getBitWidth())
475           return Val;
476 
477         E = GetLinearExpression(Val.withValue(BOp->getOperand(0), NSW), DL,
478                                 Depth + 1, AC, DT);
479         E.Offset <<= RHS.getLimitedValue();
480         E.Scale <<= RHS.getLimitedValue();
481         E.IsNUW &= NUW;
482         E.IsNSW &= NSW;
483         break;
484       }
485       return E;
486     }
487   }
488 
489   if (const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
490     return GetLinearExpression(
491         Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()), DL,
492         Depth + 1, AC, DT);
493 
494   if (isa<SExtInst>(Val.V))
495     return GetLinearExpression(
496         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
497         DL, Depth + 1, AC, DT);
498 
499   return Val;
500 }
501 
502 /// To ensure a pointer offset fits in an integer of size IndexSize
503 /// (in bits) when that size is smaller than the maximum index size. This is
504 /// an issue, for example, in particular for 32b pointers with negative indices
505 /// that rely on two's complement wrap-arounds for precise alias information
506 /// where the maximum index size is 64b.
507 static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
508   assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
509   unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
510   if (ShiftBits != 0) {
511     Offset <<= ShiftBits;
512     Offset.ashrInPlace(ShiftBits);
513   }
514 }
515 
516 namespace {
517 // A linear transformation of a Value; this class represents
518 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
519 struct VariableGEPIndex {
520   CastedValue Val;
521   APInt Scale;
522 
523   // Context instruction to use when querying information about this index.
524   const Instruction *CxtI;
525 
526   /// True if all operations in this expression are NSW.
527   bool IsNSW;
528 
529   /// True if the index should be subtracted rather than added. We don't simply
530   /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
531   /// non-wrapping, while X + INT_MIN*(-1) wraps.
532   bool IsNegated;
533 
534   bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
535     if (IsNegated == Other.IsNegated)
536       return Scale == -Other.Scale;
537     return Scale == Other.Scale;
538   }
539 
540   void dump() const {
541     print(dbgs());
542     dbgs() << "\n";
543   }
544   void print(raw_ostream &OS) const {
545     OS << "(V=" << Val.V->getName()
546        << ", zextbits=" << Val.ZExtBits
547        << ", sextbits=" << Val.SExtBits
548        << ", truncbits=" << Val.TruncBits
549        << ", scale=" << Scale
550        << ", nsw=" << IsNSW
551        << ", negated=" << IsNegated << ")";
552   }
553 };
554 }
555 
556 // Represents the internal structure of a GEP, decomposed into a base pointer,
557 // constant offsets, and variable scaled indices.
558 struct BasicAAResult::DecomposedGEP {
559   // Base pointer of the GEP
560   const Value *Base;
561   // Total constant offset from base.
562   APInt Offset;
563   // Scaled variable (non-constant) indices.
564   SmallVector<VariableGEPIndex, 4> VarIndices;
565   // Nowrap flags common to all GEP operations involved in expression.
566   GEPNoWrapFlags NWFlags = GEPNoWrapFlags::all();
567 
568   void dump() const {
569     print(dbgs());
570     dbgs() << "\n";
571   }
572   void print(raw_ostream &OS) const {
573     OS << ", inbounds=" << (NWFlags.isInBounds() ? "1" : "0")
574        << ", nuw=" << (NWFlags.hasNoUnsignedWrap() ? "1" : "0")
575        << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset
576        << ", VarIndices=[";
577     for (size_t i = 0; i < VarIndices.size(); i++) {
578       if (i != 0)
579         OS << ", ";
580       VarIndices[i].print(OS);
581     }
582     OS << "])";
583   }
584 };
585 
586 
587 /// If V is a symbolic pointer expression, decompose it into a base pointer
588 /// with a constant offset and a number of scaled symbolic offsets.
589 ///
590 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
591 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
592 /// specified amount, but which may have other unrepresented high bits. As
593 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
594 BasicAAResult::DecomposedGEP
595 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
596                                       AssumptionCache *AC, DominatorTree *DT) {
597   // Limit recursion depth to limit compile time in crazy cases.
598   unsigned MaxLookup = MaxLookupSearchDepth;
599   SearchTimes++;
600   const Instruction *CxtI = dyn_cast<Instruction>(V);
601 
602   unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
603   DecomposedGEP Decomposed;
604   Decomposed.Offset = APInt(MaxIndexSize, 0);
605   do {
606     // See if this is a bitcast or GEP.
607     const Operator *Op = dyn_cast<Operator>(V);
608     if (!Op) {
609       // The only non-operator case we can handle are GlobalAliases.
610       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
611         if (!GA->isInterposable()) {
612           V = GA->getAliasee();
613           continue;
614         }
615       }
616       Decomposed.Base = V;
617       return Decomposed;
618     }
619 
620     if (Op->getOpcode() == Instruction::BitCast ||
621         Op->getOpcode() == Instruction::AddrSpaceCast) {
622       V = Op->getOperand(0);
623       continue;
624     }
625 
626     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
627     if (!GEPOp) {
628       if (const auto *PHI = dyn_cast<PHINode>(V)) {
629         // Look through single-arg phi nodes created by LCSSA.
630         if (PHI->getNumIncomingValues() == 1) {
631           V = PHI->getIncomingValue(0);
632           continue;
633         }
634       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
635         // CaptureTracking can know about special capturing properties of some
636         // intrinsics like launder.invariant.group, that can't be expressed with
637         // the attributes, but have properties like returning aliasing pointer.
638         // Because some analysis may assume that nocaptured pointer is not
639         // returned from some special intrinsic (because function would have to
640         // be marked with returns attribute), it is crucial to use this function
641         // because it should be in sync with CaptureTracking. Not using it may
642         // cause weird miscompilations where 2 aliasing pointers are assumed to
643         // noalias.
644         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
645           V = RP;
646           continue;
647         }
648       }
649 
650       Decomposed.Base = V;
651       return Decomposed;
652     }
653 
654     // Track the common nowrap flags for all GEPs we see.
655     Decomposed.NWFlags &= GEPOp->getNoWrapFlags();
656 
657     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
658 
659     unsigned AS = GEPOp->getPointerAddressSpace();
660     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
661     gep_type_iterator GTI = gep_type_begin(GEPOp);
662     unsigned IndexSize = DL.getIndexSizeInBits(AS);
663     // Assume all GEP operands are constants until proven otherwise.
664     bool GepHasConstantOffset = true;
665     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
666          I != E; ++I, ++GTI) {
667       const Value *Index = *I;
668       // Compute the (potentially symbolic) offset in bytes for this index.
669       if (StructType *STy = GTI.getStructTypeOrNull()) {
670         // For a struct, add the member offset.
671         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
672         if (FieldNo == 0)
673           continue;
674 
675         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
676         continue;
677       }
678 
679       // For an array/pointer, add the element offset, explicitly scaled.
680       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
681         if (CIdx->isZero())
682           continue;
683 
684         // Don't attempt to analyze GEPs if the scalable index is not zero.
685         TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
686         if (AllocTypeSize.isScalable()) {
687           Decomposed.Base = V;
688           return Decomposed;
689         }
690 
691         Decomposed.Offset += AllocTypeSize.getFixedValue() *
692                              CIdx->getValue().sextOrTrunc(MaxIndexSize);
693         continue;
694       }
695 
696       TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
697       if (AllocTypeSize.isScalable()) {
698         Decomposed.Base = V;
699         return Decomposed;
700       }
701 
702       GepHasConstantOffset = false;
703 
704       // If the integer type is smaller than the index size, it is implicitly
705       // sign extended or truncated to index size.
706       bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
707       bool NUW = GEPOp->hasNoUnsignedWrap();
708       bool NonNeg = NUSW && NUW;
709       unsigned Width = Index->getType()->getIntegerBitWidth();
710       unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
711       unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
712       LinearExpression LE = GetLinearExpression(
713           CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
714 
715       // Scale by the type size.
716       unsigned TypeSize = AllocTypeSize.getFixedValue();
717       LE = LE.mul(APInt(IndexSize, TypeSize), NUW, NUSW);
718       Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
719       APInt Scale = LE.Scale.sext(MaxIndexSize);
720       if (!LE.IsNUW)
721         Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
722 
723       // If we already had an occurrence of this index variable, merge this
724       // scale into it.  For example, we want to handle:
725       //   A[x][x] -> x*16 + x*4 -> x*20
726       // This also ensures that 'x' only appears in the index list once.
727       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
728         if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
729              areBothVScale(Decomposed.VarIndices[i].Val.V, LE.Val.V)) &&
730             Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
731           Scale += Decomposed.VarIndices[i].Scale;
732           // We cannot guarantee no-wrap for the merge.
733           LE.IsNSW = LE.IsNUW = false;
734           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
735           break;
736         }
737       }
738 
739       // Make sure that we have a scale that makes sense for this target's
740       // index size.
741       adjustToIndexSize(Scale, IndexSize);
742 
743       if (!!Scale) {
744         VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
745                                   /* IsNegated */ false};
746         Decomposed.VarIndices.push_back(Entry);
747       }
748     }
749 
750     // Take care of wrap-arounds
751     if (GepHasConstantOffset)
752       adjustToIndexSize(Decomposed.Offset, IndexSize);
753 
754     // Analyze the base pointer next.
755     V = GEPOp->getOperand(0);
756   } while (--MaxLookup);
757 
758   // If the chain of expressions is too deep, just return early.
759   Decomposed.Base = V;
760   SearchLimitReached++;
761   return Decomposed;
762 }
763 
764 ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
765                                             AAQueryInfo &AAQI,
766                                             bool IgnoreLocals) {
767   assert(Visited.empty() && "Visited must be cleared after use!");
768   auto _ = make_scope_exit([&] { Visited.clear(); });
769 
770   unsigned MaxLookup = 8;
771   SmallVector<const Value *, 16> Worklist;
772   Worklist.push_back(Loc.Ptr);
773   ModRefInfo Result = ModRefInfo::NoModRef;
774 
775   do {
776     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
777     if (!Visited.insert(V).second)
778       continue;
779 
780     // Ignore allocas if we were instructed to do so.
781     if (IgnoreLocals && isa<AllocaInst>(V))
782       continue;
783 
784     // If the location points to memory that is known to be invariant for
785     // the life of the underlying SSA value, then we can exclude Mod from
786     // the set of valid memory effects.
787     //
788     // An argument that is marked readonly and noalias is known to be
789     // invariant while that function is executing.
790     if (const Argument *Arg = dyn_cast<Argument>(V)) {
791       if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
792         Result |= ModRefInfo::Ref;
793         continue;
794       }
795     }
796 
797     // A global constant can't be mutated.
798     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
799       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
800       // global to be marked constant in some modules and non-constant in
801       // others.  GV may even be a declaration, not a definition.
802       if (!GV->isConstant())
803         return ModRefInfo::ModRef;
804       continue;
805     }
806 
807     // If both select values point to local memory, then so does the select.
808     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
809       Worklist.push_back(SI->getTrueValue());
810       Worklist.push_back(SI->getFalseValue());
811       continue;
812     }
813 
814     // If all values incoming to a phi node point to local memory, then so does
815     // the phi.
816     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
817       // Don't bother inspecting phi nodes with many operands.
818       if (PN->getNumIncomingValues() > MaxLookup)
819         return ModRefInfo::ModRef;
820       append_range(Worklist, PN->incoming_values());
821       continue;
822     }
823 
824     // Otherwise be conservative.
825     return ModRefInfo::ModRef;
826   } while (!Worklist.empty() && --MaxLookup);
827 
828   // If we hit the maximum number of instructions to examine, be conservative.
829   if (!Worklist.empty())
830     return ModRefInfo::ModRef;
831 
832   return Result;
833 }
834 
835 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
836   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
837   return II && II->getIntrinsicID() == IID;
838 }
839 
840 /// Returns the behavior when calling the given call site.
841 MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
842                                               AAQueryInfo &AAQI) {
843   MemoryEffects Min = Call->getAttributes().getMemoryEffects();
844 
845   if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
846     MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
847     // Operand bundles on the call may also read or write memory, in addition
848     // to the behavior of the called function.
849     if (Call->hasReadingOperandBundles())
850       FuncME |= MemoryEffects::readOnly();
851     if (Call->hasClobberingOperandBundles())
852       FuncME |= MemoryEffects::writeOnly();
853     Min &= FuncME;
854   }
855 
856   return Min;
857 }
858 
859 /// Returns the behavior when calling the given function. For use when the call
860 /// site is not known.
861 MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
862   switch (F->getIntrinsicID()) {
863   case Intrinsic::experimental_guard:
864   case Intrinsic::experimental_deoptimize:
865     // These intrinsics can read arbitrary memory, and additionally modref
866     // inaccessible memory to model control dependence.
867     return MemoryEffects::readOnly() |
868            MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef);
869   }
870 
871   return F->getMemoryEffects();
872 }
873 
874 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
875                                            unsigned ArgIdx) {
876   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
877     return ModRefInfo::Mod;
878 
879   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
880     return ModRefInfo::Ref;
881 
882   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
883     return ModRefInfo::NoModRef;
884 
885   return ModRefInfo::ModRef;
886 }
887 
888 #ifndef NDEBUG
889 static const Function *getParent(const Value *V) {
890   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
891     if (!inst->getParent())
892       return nullptr;
893     return inst->getParent()->getParent();
894   }
895 
896   if (const Argument *arg = dyn_cast<Argument>(V))
897     return arg->getParent();
898 
899   return nullptr;
900 }
901 
902 static bool notDifferentParent(const Value *O1, const Value *O2) {
903 
904   const Function *F1 = getParent(O1);
905   const Function *F2 = getParent(O2);
906 
907   return !F1 || !F2 || F1 == F2;
908 }
909 #endif
910 
911 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
912                                  const MemoryLocation &LocB, AAQueryInfo &AAQI,
913                                  const Instruction *CtxI) {
914   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
915          "BasicAliasAnalysis doesn't support interprocedural queries.");
916   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
917 }
918 
919 /// Checks to see if the specified callsite can clobber the specified memory
920 /// object.
921 ///
922 /// Since we only look at local properties of this function, we really can't
923 /// say much about this query.  We do, however, use simple "address taken"
924 /// analysis on local objects.
925 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
926                                         const MemoryLocation &Loc,
927                                         AAQueryInfo &AAQI) {
928   assert(notDifferentParent(Call, Loc.Ptr) &&
929          "AliasAnalysis query involving multiple functions!");
930 
931   const Value *Object = getUnderlyingObject(Loc.Ptr);
932 
933   // Calls marked 'tail' cannot read or write allocas from the current frame
934   // because the current frame might be destroyed by the time they run. However,
935   // a tail call may use an alloca with byval. Calling with byval copies the
936   // contents of the alloca into argument registers or stack slots, so there is
937   // no lifetime issue.
938   if (isa<AllocaInst>(Object))
939     if (const CallInst *CI = dyn_cast<CallInst>(Call))
940       if (CI->isTailCall() &&
941           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
942         return ModRefInfo::NoModRef;
943 
944   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
945   // modify them even though the alloca is not escaped.
946   if (auto *AI = dyn_cast<AllocaInst>(Object))
947     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
948       return ModRefInfo::Mod;
949 
950   // A call can access a locally allocated object either because it is passed as
951   // an argument to the call, or because it has escaped prior to the call.
952   //
953   // Make sure the object has not escaped here, and then check that none of the
954   // call arguments alias the object below.
955   if (!isa<Constant>(Object) && Call != Object &&
956       AAQI.CI->isNotCapturedBefore(Object, Call, /*OrAt*/ false)) {
957 
958     // Optimistically assume that call doesn't touch Object and check this
959     // assumption in the following loop.
960     ModRefInfo Result = ModRefInfo::NoModRef;
961 
962     unsigned OperandNo = 0;
963     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
964          CI != CE; ++CI, ++OperandNo) {
965       if (!(*CI)->getType()->isPointerTy())
966         continue;
967 
968       // Call doesn't access memory through this operand, so we don't care
969       // if it aliases with Object.
970       if (Call->doesNotAccessMemory(OperandNo))
971         continue;
972 
973       // If this is a no-capture pointer argument, see if we can tell that it
974       // is impossible to alias the pointer we're checking.
975       AliasResult AR =
976           AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(*CI),
977                          MemoryLocation::getBeforeOrAfter(Object), AAQI);
978       // Operand doesn't alias 'Object', continue looking for other aliases
979       if (AR == AliasResult::NoAlias)
980         continue;
981       // Operand aliases 'Object', but call doesn't modify it. Strengthen
982       // initial assumption and keep looking in case if there are more aliases.
983       if (Call->onlyReadsMemory(OperandNo)) {
984         Result |= ModRefInfo::Ref;
985         continue;
986       }
987       // Operand aliases 'Object' but call only writes into it.
988       if (Call->onlyWritesMemory(OperandNo)) {
989         Result |= ModRefInfo::Mod;
990         continue;
991       }
992       // This operand aliases 'Object' and call reads and writes into it.
993       // Setting ModRef will not yield an early return below, MustAlias is not
994       // used further.
995       Result = ModRefInfo::ModRef;
996       break;
997     }
998 
999     // Early return if we improved mod ref information
1000     if (!isModAndRefSet(Result))
1001       return Result;
1002   }
1003 
1004   // If the call is malloc/calloc like, we can assume that it doesn't
1005   // modify any IR visible value.  This is only valid because we assume these
1006   // routines do not read values visible in the IR.  TODO: Consider special
1007   // casing realloc and strdup routines which access only their arguments as
1008   // well.  Or alternatively, replace all of this with inaccessiblememonly once
1009   // that's implemented fully.
1010   if (isMallocOrCallocLikeFn(Call, &TLI)) {
1011     // Be conservative if the accessed pointer may alias the allocation -
1012     // fallback to the generic handling below.
1013     if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
1014         AliasResult::NoAlias)
1015       return ModRefInfo::NoModRef;
1016   }
1017 
1018   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1019   // writing so that proper control dependencies are maintained but they never
1020   // mod any particular memory location visible to the IR.
1021   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1022   // intrinsic is now modeled as reading memory. This prevents hoisting the
1023   // invariant.start intrinsic over stores. Consider:
1024   // *ptr = 40;
1025   // *ptr = 50;
1026   // invariant_start(ptr)
1027   // int val = *ptr;
1028   // print(val);
1029   //
1030   // This cannot be transformed to:
1031   //
1032   // *ptr = 40;
1033   // invariant_start(ptr)
1034   // *ptr = 50;
1035   // int val = *ptr;
1036   // print(val);
1037   //
1038   // The transformation will cause the second store to be ignored (based on
1039   // rules of invariant.start)  and print 40, while the first program always
1040   // prints 50.
1041   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1042     return ModRefInfo::Ref;
1043 
1044   // Be conservative.
1045   return ModRefInfo::ModRef;
1046 }
1047 
1048 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1049                                         const CallBase *Call2,
1050                                         AAQueryInfo &AAQI) {
1051   // Guard intrinsics are marked as arbitrarily writing so that proper control
1052   // dependencies are maintained but they never mods any particular memory
1053   // location.
1054   //
1055   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1056   // heap state at the point the guard is issued needs to be consistent in case
1057   // the guard invokes the "deopt" continuation.
1058 
1059   // NB! This function is *not* commutative, so we special case two
1060   // possibilities for guard intrinsics.
1061 
1062   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1063     return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1064                ? ModRefInfo::Ref
1065                : ModRefInfo::NoModRef;
1066 
1067   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1068     return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1069                ? ModRefInfo::Mod
1070                : ModRefInfo::NoModRef;
1071 
1072   // Be conservative.
1073   return ModRefInfo::ModRef;
1074 }
1075 
1076 /// Return true if we know V to the base address of the corresponding memory
1077 /// object.  This implies that any address less than V must be out of bounds
1078 /// for the underlying object.  Note that just being isIdentifiedObject() is
1079 /// not enough - For example, a negative offset from a noalias argument or call
1080 /// can be inbounds w.r.t the actual underlying object.
1081 static bool isBaseOfObject(const Value *V) {
1082   // TODO: We can handle other cases here
1083   // 1) For GC languages, arguments to functions are often required to be
1084   //    base pointers.
1085   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1086   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1087 }
1088 
1089 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1090 /// another pointer.
1091 ///
1092 /// We know that V1 is a GEP, but we don't know anything about V2.
1093 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1094 /// V2.
1095 AliasResult BasicAAResult::aliasGEP(
1096     const GEPOperator *GEP1, LocationSize V1Size,
1097     const Value *V2, LocationSize V2Size,
1098     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1099   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1100     // TODO: This limitation exists for compile-time reasons. Relax it if we
1101     // can avoid exponential pathological cases.
1102     if (!isa<GEPOperator>(V2))
1103       return AliasResult::MayAlias;
1104 
1105     // If both accesses have unknown size, we can only check whether the base
1106     // objects don't alias.
1107     AliasResult BaseAlias =
1108         AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1109                        MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1110     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1111                                              : AliasResult::MayAlias;
1112   }
1113 
1114   DominatorTree *DT = getDT(AAQI);
1115   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1116   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1117 
1118   // Bail if we were not able to decompose anything.
1119   if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1120     return AliasResult::MayAlias;
1121 
1122   // Swap GEP1 and GEP2 if GEP2 has more variable indices.
1123   if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1124     std::swap(DecompGEP1, DecompGEP2);
1125     std::swap(V1Size, V2Size);
1126     std::swap(UnderlyingV1, UnderlyingV2);
1127   }
1128 
1129   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1130   // symbolic difference.
1131   subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1132 
1133   // If an inbounds GEP would have to start from an out of bounds address
1134   // for the two to alias, then we can assume noalias.
1135   // TODO: Remove !isScalable() once BasicAA fully support scalable location
1136   // size
1137 
1138   if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1139       V2Size.hasValue() && !V2Size.isScalable() &&
1140       DecompGEP1.Offset.sge(V2Size.getValue()) &&
1141       isBaseOfObject(DecompGEP2.Base))
1142     return AliasResult::NoAlias;
1143 
1144   // Symmetric case to above.
1145   if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1146       V1Size.hasValue() && !V1Size.isScalable() &&
1147       DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1148       isBaseOfObject(DecompGEP1.Base))
1149     return AliasResult::NoAlias;
1150 
1151   // For GEPs with identical offsets, we can preserve the size and AAInfo
1152   // when performing the alias check on the underlying objects.
1153   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1154     return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1155                           MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1156 
1157   // Do the base pointers alias?
1158   AliasResult BaseAlias =
1159       AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1160                      MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1161 
1162   // If we get a No or May, then return it immediately, no amount of analysis
1163   // will improve this situation.
1164   if (BaseAlias != AliasResult::MustAlias) {
1165     assert(BaseAlias == AliasResult::NoAlias ||
1166            BaseAlias == AliasResult::MayAlias);
1167     return BaseAlias;
1168   }
1169 
1170   // If there is a constant difference between the pointers, but the difference
1171   // is less than the size of the associated memory object, then we know
1172   // that the objects are partially overlapping.  If the difference is
1173   // greater, we know they do not overlap.
1174   if (DecompGEP1.VarIndices.empty()) {
1175     APInt &Off = DecompGEP1.Offset;
1176 
1177     // Initialize for Off >= 0 (V2 <= GEP1) case.
1178     LocationSize VLeftSize = V2Size;
1179     LocationSize VRightSize = V1Size;
1180     const bool Swapped = Off.isNegative();
1181 
1182     if (Swapped) {
1183       // Swap if we have the situation where:
1184       // +                +
1185       // | BaseOffset     |
1186       // ---------------->|
1187       // |-->V1Size       |-------> V2Size
1188       // GEP1             V2
1189       std::swap(VLeftSize, VRightSize);
1190       Off = -Off;
1191     }
1192 
1193     if (!VLeftSize.hasValue())
1194       return AliasResult::MayAlias;
1195 
1196     const TypeSize LSize = VLeftSize.getValue();
1197     if (!LSize.isScalable()) {
1198       if (Off.ult(LSize)) {
1199         // Conservatively drop processing if a phi was visited and/or offset is
1200         // too big.
1201         AliasResult AR = AliasResult::PartialAlias;
1202         if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1203             Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1204           // Memory referenced by right pointer is nested. Save the offset in
1205           // cache. Note that originally offset estimated as GEP1-V2, but
1206           // AliasResult contains the shift that represents GEP1+Offset=V2.
1207           AR.setOffset(-Off.getSExtValue());
1208           AR.swap(Swapped);
1209         }
1210         return AR;
1211       }
1212       return AliasResult::NoAlias;
1213     } else {
1214       // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1215       ConstantRange CR = getVScaleRange(&F, Off.getBitWidth());
1216       bool Overflow;
1217       APInt UpperRange = CR.getUnsignedMax().umul_ov(
1218           APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1219       if (!Overflow && Off.uge(UpperRange))
1220         return AliasResult::NoAlias;
1221     }
1222   }
1223 
1224   // VScale Alias Analysis - Given one scalable offset between accesses and a
1225   // scalable typesize, we can divide each side by vscale, treating both values
1226   // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1227   if (DecompGEP1.VarIndices.size() == 1 &&
1228       DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1229       DecompGEP1.Offset.isZero() &&
1230       PatternMatch::match(DecompGEP1.VarIndices[0].Val.V,
1231                           PatternMatch::m_VScale())) {
1232     const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1233     APInt Scale =
1234         ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1235     LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1236 
1237     // Check if the offset is known to not overflow, if it does then attempt to
1238     // prove it with the known values of vscale_range.
1239     bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1240     if (Overflows) {
1241       ConstantRange CR = getVScaleRange(&F, Scale.getBitWidth());
1242       (void)CR.getSignedMax().smul_ov(Scale, Overflows);
1243     }
1244 
1245     if (!Overflows) {
1246       // Note that we do not check that the typesize is scalable, as vscale >= 1
1247       // so noalias still holds so long as the dependency distance is at least
1248       // as big as the typesize.
1249       if (VLeftSize.hasValue() &&
1250           Scale.abs().uge(VLeftSize.getValue().getKnownMinValue()))
1251         return AliasResult::NoAlias;
1252     }
1253   }
1254 
1255   // If the difference between pointers is Offset +<nuw> Indices then we know
1256   // that the addition does not wrap the pointer index type (add nuw) and the
1257   // constant Offset is a lower bound on the distance between the pointers. We
1258   // can then prove NoAlias via Offset u>= VLeftSize.
1259   //    +                +                     +
1260   //    | BaseOffset     |   +<nuw> Indices    |
1261   //    ---------------->|-------------------->|
1262   //    |-->V2Size       |                     |-------> V1Size
1263   //   LHS                                    RHS
1264   if (!DecompGEP1.VarIndices.empty() &&
1265       DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1266       !V2Size.isScalable() && DecompGEP1.Offset.uge(V2Size.getValue()))
1267     return AliasResult::NoAlias;
1268 
1269   // Bail on analysing scalable LocationSize
1270   if (V1Size.isScalable() || V2Size.isScalable())
1271     return AliasResult::MayAlias;
1272 
1273   // We need to know both acess sizes for all the following heuristics.
1274   if (!V1Size.hasValue() || !V2Size.hasValue())
1275     return AliasResult::MayAlias;
1276 
1277   APInt GCD;
1278   ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1279   for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1280     const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1281     const APInt &Scale = Index.Scale;
1282     APInt ScaleForGCD = Scale;
1283     if (!Index.IsNSW)
1284       ScaleForGCD =
1285           APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1286 
1287     if (i == 0)
1288       GCD = ScaleForGCD.abs();
1289     else
1290       GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1291 
1292     ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1293                                             true, &AC, Index.CxtI);
1294     KnownBits Known =
1295         computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1296     CR = CR.intersectWith(
1297         ConstantRange::fromKnownBits(Known, /* Signed */ true),
1298         ConstantRange::Signed);
1299     CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1300 
1301     assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1302            "Bit widths are normalized to MaxIndexSize");
1303     if (Index.IsNSW)
1304       CR = CR.smul_sat(ConstantRange(Scale));
1305     else
1306       CR = CR.smul_fast(ConstantRange(Scale));
1307 
1308     if (Index.IsNegated)
1309       OffsetRange = OffsetRange.sub(CR);
1310     else
1311       OffsetRange = OffsetRange.add(CR);
1312   }
1313 
1314   // We now have accesses at two offsets from the same base:
1315   //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1316   //  2. 0 with size V2Size
1317   // Using arithmetic modulo GCD, the accesses are at
1318   // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1319   // into the range [V2Size..GCD), then we know they cannot overlap.
1320   APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1321   if (ModOffset.isNegative())
1322     ModOffset += GCD; // We want mod, not rem.
1323   if (ModOffset.uge(V2Size.getValue()) &&
1324       (GCD - ModOffset).uge(V1Size.getValue()))
1325     return AliasResult::NoAlias;
1326 
1327   // Compute ranges of potentially accessed bytes for both accesses. If the
1328   // interseciton is empty, there can be no overlap.
1329   unsigned BW = OffsetRange.getBitWidth();
1330   ConstantRange Range1 = OffsetRange.add(
1331       ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1332   ConstantRange Range2 =
1333       ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1334   if (Range1.intersectWith(Range2).isEmptySet())
1335     return AliasResult::NoAlias;
1336 
1337   // Try to determine the range of values for VarIndex such that
1338   // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1339   std::optional<APInt> MinAbsVarIndex;
1340   if (DecompGEP1.VarIndices.size() == 1) {
1341     // VarIndex = Scale*V.
1342     const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1343     if (Var.Val.TruncBits == 0 &&
1344         isKnownNonZero(Var.Val.V, SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1345       // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1346       // potentially wrapping math.
1347       auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1348         if (Var.IsNSW)
1349           return true;
1350 
1351         int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1352         // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1353         // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1354         // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1355         int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1356         if (MaxScaleValueBW <= 0)
1357           return false;
1358         return Var.Scale.ule(
1359             APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1360       };
1361       // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1362       // presence of potentially wrapping math.
1363       if (MultiplyByScaleNoWrap(Var)) {
1364         // If V != 0 then abs(VarIndex) >= abs(Scale).
1365         MinAbsVarIndex = Var.Scale.abs();
1366       }
1367     }
1368   } else if (DecompGEP1.VarIndices.size() == 2) {
1369     // VarIndex = Scale*V0 + (-Scale)*V1.
1370     // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1371     // Check that MayBeCrossIteration is false, to avoid reasoning about
1372     // inequality of values across loop iterations.
1373     const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1374     const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1375     if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1376         Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1377         isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1378                         DT))
1379       MinAbsVarIndex = Var0.Scale.abs();
1380   }
1381 
1382   if (MinAbsVarIndex) {
1383     // The constant offset will have added at least +/-MinAbsVarIndex to it.
1384     APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1385     APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1386     // We know that Offset <= OffsetLo || Offset >= OffsetHi
1387     if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1388         OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1389       return AliasResult::NoAlias;
1390   }
1391 
1392   if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1393     return AliasResult::NoAlias;
1394 
1395   // Statically, we can see that the base objects are the same, but the
1396   // pointers have dynamic offsets which we can't resolve. And none of our
1397   // little tricks above worked.
1398   return AliasResult::MayAlias;
1399 }
1400 
1401 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1402   // If the results agree, take it.
1403   if (A == B)
1404     return A;
1405   // A mix of PartialAlias and MustAlias is PartialAlias.
1406   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1407       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1408     return AliasResult::PartialAlias;
1409   // Otherwise, we don't know anything.
1410   return AliasResult::MayAlias;
1411 }
1412 
1413 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1414 /// against another.
1415 AliasResult
1416 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1417                            const Value *V2, LocationSize V2Size,
1418                            AAQueryInfo &AAQI) {
1419   // If the values are Selects with the same condition, we can do a more precise
1420   // check: just check for aliases between the values on corresponding arms.
1421   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1422     if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1423                                       AAQI)) {
1424       AliasResult Alias =
1425           AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1426                          MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1427       if (Alias == AliasResult::MayAlias)
1428         return AliasResult::MayAlias;
1429       AliasResult ThisAlias =
1430           AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1431                          MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1432       return MergeAliasResults(ThisAlias, Alias);
1433     }
1434 
1435   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1436   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1437   AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1438                                      MemoryLocation(V2, V2Size), AAQI);
1439   if (Alias == AliasResult::MayAlias)
1440     return AliasResult::MayAlias;
1441 
1442   AliasResult ThisAlias =
1443       AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1444                      MemoryLocation(V2, V2Size), AAQI);
1445   return MergeAliasResults(ThisAlias, Alias);
1446 }
1447 
1448 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1449 /// another.
1450 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1451                                     const Value *V2, LocationSize V2Size,
1452                                     AAQueryInfo &AAQI) {
1453   if (!PN->getNumIncomingValues())
1454     return AliasResult::NoAlias;
1455   // If the values are PHIs in the same block, we can do a more precise
1456   // as well as efficient check: just check for aliases between the values
1457   // on corresponding edges.
1458   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1459     if (PN2->getParent() == PN->getParent()) {
1460       std::optional<AliasResult> Alias;
1461       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1462         AliasResult ThisAlias = AAQI.AAR.alias(
1463             MemoryLocation(PN->getIncomingValue(i), PNSize),
1464             MemoryLocation(
1465                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1466             AAQI);
1467         if (Alias)
1468           *Alias = MergeAliasResults(*Alias, ThisAlias);
1469         else
1470           Alias = ThisAlias;
1471         if (*Alias == AliasResult::MayAlias)
1472           break;
1473       }
1474       return *Alias;
1475     }
1476 
1477   SmallVector<Value *, 4> V1Srcs;
1478   // If a phi operand recurses back to the phi, we can still determine NoAlias
1479   // if we don't alias the underlying objects of the other phi operands, as we
1480   // know that the recursive phi needs to be based on them in some way.
1481   bool isRecursive = false;
1482   auto CheckForRecPhi = [&](Value *PV) {
1483     if (!EnableRecPhiAnalysis)
1484       return false;
1485     if (getUnderlyingObject(PV) == PN) {
1486       isRecursive = true;
1487       return true;
1488     }
1489     return false;
1490   };
1491 
1492   SmallPtrSet<Value *, 4> UniqueSrc;
1493   Value *OnePhi = nullptr;
1494   for (Value *PV1 : PN->incoming_values()) {
1495     // Skip the phi itself being the incoming value.
1496     if (PV1 == PN)
1497       continue;
1498 
1499     if (isa<PHINode>(PV1)) {
1500       if (OnePhi && OnePhi != PV1) {
1501         // To control potential compile time explosion, we choose to be
1502         // conserviate when we have more than one Phi input.  It is important
1503         // that we handle the single phi case as that lets us handle LCSSA
1504         // phi nodes and (combined with the recursive phi handling) simple
1505         // pointer induction variable patterns.
1506         return AliasResult::MayAlias;
1507       }
1508       OnePhi = PV1;
1509     }
1510 
1511     if (CheckForRecPhi(PV1))
1512       continue;
1513 
1514     if (UniqueSrc.insert(PV1).second)
1515       V1Srcs.push_back(PV1);
1516   }
1517 
1518   if (OnePhi && UniqueSrc.size() > 1)
1519     // Out of an abundance of caution, allow only the trivial lcssa and
1520     // recursive phi cases.
1521     return AliasResult::MayAlias;
1522 
1523   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1524   // value. This should only be possible in blocks unreachable from the entry
1525   // block, but return MayAlias just in case.
1526   if (V1Srcs.empty())
1527     return AliasResult::MayAlias;
1528 
1529   // If this PHI node is recursive, indicate that the pointer may be moved
1530   // across iterations. We can only prove NoAlias if different underlying
1531   // objects are involved.
1532   if (isRecursive)
1533     PNSize = LocationSize::beforeOrAfterPointer();
1534 
1535   // In the recursive alias queries below, we may compare values from two
1536   // different loop iterations.
1537   SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1538 
1539   AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1540                                      MemoryLocation(V2, V2Size), AAQI);
1541 
1542   // Early exit if the check of the first PHI source against V2 is MayAlias.
1543   // Other results are not possible.
1544   if (Alias == AliasResult::MayAlias)
1545     return AliasResult::MayAlias;
1546   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1547   // remain valid to all elements and needs to conservatively return MayAlias.
1548   if (isRecursive && Alias != AliasResult::NoAlias)
1549     return AliasResult::MayAlias;
1550 
1551   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1552   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1553   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1554     Value *V = V1Srcs[i];
1555 
1556     AliasResult ThisAlias = AAQI.AAR.alias(
1557         MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1558     Alias = MergeAliasResults(ThisAlias, Alias);
1559     if (Alias == AliasResult::MayAlias)
1560       break;
1561   }
1562 
1563   return Alias;
1564 }
1565 
1566 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1567 /// array references.
1568 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1569                                       const Value *V2, LocationSize V2Size,
1570                                       AAQueryInfo &AAQI,
1571                                       const Instruction *CtxI) {
1572   // If either of the memory references is empty, it doesn't matter what the
1573   // pointer values are.
1574   if (V1Size.isZero() || V2Size.isZero())
1575     return AliasResult::NoAlias;
1576 
1577   // Strip off any casts if they exist.
1578   V1 = V1->stripPointerCastsForAliasAnalysis();
1579   V2 = V2->stripPointerCastsForAliasAnalysis();
1580 
1581   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1582   // value for undef that aliases nothing in the program.
1583   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1584     return AliasResult::NoAlias;
1585 
1586   // Are we checking for alias of the same value?
1587   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1588   // different iterations. We must therefore make sure that this is not the
1589   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1590   // happen by looking at the visited phi nodes and making sure they cannot
1591   // reach the value.
1592   if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1593     return AliasResult::MustAlias;
1594 
1595   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1596     return AliasResult::NoAlias; // Scalars cannot alias each other
1597 
1598   // Figure out what objects these things are pointing to if we can.
1599   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1600   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1601 
1602   // Null values in the default address space don't point to any object, so they
1603   // don't alias any other pointer.
1604   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1605     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1606       return AliasResult::NoAlias;
1607   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1608     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1609       return AliasResult::NoAlias;
1610 
1611   if (O1 != O2) {
1612     // If V1/V2 point to two different objects, we know that we have no alias.
1613     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1614       return AliasResult::NoAlias;
1615 
1616     // Function arguments can't alias with things that are known to be
1617     // unambigously identified at the function level.
1618     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1619         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1620       return AliasResult::NoAlias;
1621 
1622     // If one pointer is the result of a call/invoke or load and the other is a
1623     // non-escaping local object within the same function, then we know the
1624     // object couldn't escape to a point where the call could return it.
1625     //
1626     // Note that if the pointers are in different functions, there are a
1627     // variety of complications. A call with a nocapture argument may still
1628     // temporary store the nocapture argument's value in a temporary memory
1629     // location if that memory location doesn't escape. Or it may pass a
1630     // nocapture value to other functions as long as they don't capture it.
1631     if (isEscapeSource(O1) && AAQI.CI->isNotCapturedBefore(
1632                                   O2, dyn_cast<Instruction>(O1), /*OrAt*/ true))
1633       return AliasResult::NoAlias;
1634     if (isEscapeSource(O2) && AAQI.CI->isNotCapturedBefore(
1635                                   O1, dyn_cast<Instruction>(O2), /*OrAt*/ true))
1636       return AliasResult::NoAlias;
1637   }
1638 
1639   // If the size of one access is larger than the entire object on the other
1640   // side, then we know such behavior is undefined and can assume no alias.
1641   bool NullIsValidLocation = NullPointerIsDefined(&F);
1642   if ((isObjectSmallerThan(
1643           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1644           TLI, NullIsValidLocation)) ||
1645       (isObjectSmallerThan(
1646           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1647           TLI, NullIsValidLocation)))
1648     return AliasResult::NoAlias;
1649 
1650   if (EnableSeparateStorageAnalysis) {
1651     for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1652       if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1653         continue;
1654 
1655       AssumeInst *Assume = cast<AssumeInst>(Elem);
1656       OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1657       if (OBU.getTagName() == "separate_storage") {
1658         assert(OBU.Inputs.size() == 2);
1659         const Value *Hint1 = OBU.Inputs[0].get();
1660         const Value *Hint2 = OBU.Inputs[1].get();
1661         // This is often a no-op; instcombine rewrites this for us. No-op
1662         // getUnderlyingObject calls are fast, though.
1663         const Value *HintO1 = getUnderlyingObject(Hint1);
1664         const Value *HintO2 = getUnderlyingObject(Hint2);
1665 
1666         DominatorTree *DT = getDT(AAQI);
1667         auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1668           if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1669             return isValidAssumeForContext(Assume, PtrI, DT,
1670                                            /* AllowEphemerals */ true);
1671           }
1672           if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1673             const Instruction *FirstI =
1674                 &*PtrA->getParent()->getEntryBlock().begin();
1675             return isValidAssumeForContext(Assume, FirstI, DT,
1676                                            /* AllowEphemerals */ true);
1677           }
1678           return false;
1679         };
1680 
1681         if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1682           // Note that we go back to V1 and V2 for the
1683           // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1684           // so strictly more assumptions are valid for them.
1685           if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1686                                                /* AllowEphemerals */ true)) ||
1687               ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1688             return AliasResult::NoAlias;
1689           }
1690         }
1691       }
1692     }
1693   }
1694 
1695   // If one the accesses may be before the accessed pointer, canonicalize this
1696   // by using unknown after-pointer sizes for both accesses. This is
1697   // equivalent, because regardless of which pointer is lower, one of them
1698   // will always came after the other, as long as the underlying objects aren't
1699   // disjoint. We do this so that the rest of BasicAA does not have to deal
1700   // with accesses before the base pointer, and to improve cache utilization by
1701   // merging equivalent states.
1702   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1703     V1Size = LocationSize::afterPointer();
1704     V2Size = LocationSize::afterPointer();
1705   }
1706 
1707   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1708   // for recursive queries. For this reason, this limit is chosen to be large
1709   // enough to be very rarely hit, while still being small enough to avoid
1710   // stack overflows.
1711   if (AAQI.Depth >= 512)
1712     return AliasResult::MayAlias;
1713 
1714   // Check the cache before climbing up use-def chains. This also terminates
1715   // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1716   // cache key, because some cases where MayBeCrossIteration==false returns
1717   // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1718   AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1719                             {V2, V2Size, AAQI.MayBeCrossIteration});
1720   const bool Swapped = V1 > V2;
1721   if (Swapped)
1722     std::swap(Locs.first, Locs.second);
1723   const auto &Pair = AAQI.AliasCache.try_emplace(
1724       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1725   if (!Pair.second) {
1726     auto &Entry = Pair.first->second;
1727     if (!Entry.isDefinitive()) {
1728       // Remember that we used an assumption. This may either be a direct use
1729       // of an assumption, or a use of an entry that may itself be based on an
1730       // assumption.
1731       ++AAQI.NumAssumptionUses;
1732       if (Entry.isAssumption())
1733         ++Entry.NumAssumptionUses;
1734     }
1735     // Cache contains sorted {V1,V2} pairs but we should return original order.
1736     auto Result = Entry.Result;
1737     Result.swap(Swapped);
1738     return Result;
1739   }
1740 
1741   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1742   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1743   AliasResult Result =
1744       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1745 
1746   auto It = AAQI.AliasCache.find(Locs);
1747   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1748   auto &Entry = It->second;
1749 
1750   // Check whether a NoAlias assumption has been used, but disproven.
1751   bool AssumptionDisproven =
1752       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1753   if (AssumptionDisproven)
1754     Result = AliasResult::MayAlias;
1755 
1756   // This is a definitive result now, when considered as a root query.
1757   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1758   Entry.Result = Result;
1759   // Cache contains sorted {V1,V2} pairs.
1760   Entry.Result.swap(Swapped);
1761 
1762   // If the assumption has been disproven, remove any results that may have
1763   // been based on this assumption. Do this after the Entry updates above to
1764   // avoid iterator invalidation.
1765   if (AssumptionDisproven)
1766     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1767       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1768 
1769   // The result may still be based on assumptions higher up in the chain.
1770   // Remember it, so it can be purged from the cache later.
1771   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1772       Result != AliasResult::MayAlias) {
1773     AAQI.AssumptionBasedResults.push_back(Locs);
1774     Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased;
1775   } else {
1776     Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1777   }
1778 
1779   // Depth is incremented before this function is called, so Depth==1 indicates
1780   // a root query.
1781   if (AAQI.Depth == 1) {
1782     // Any remaining assumption based results must be based on proven
1783     // assumptions, so convert them to definitive results.
1784     for (const auto &Loc : AAQI.AssumptionBasedResults) {
1785       auto It = AAQI.AliasCache.find(Loc);
1786       if (It != AAQI.AliasCache.end())
1787         It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1788     }
1789     AAQI.AssumptionBasedResults.clear();
1790     AAQI.NumAssumptionUses = 0;
1791   }
1792   return Result;
1793 }
1794 
1795 AliasResult BasicAAResult::aliasCheckRecursive(
1796     const Value *V1, LocationSize V1Size,
1797     const Value *V2, LocationSize V2Size,
1798     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1799   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1800     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1801     if (Result != AliasResult::MayAlias)
1802       return Result;
1803   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1804     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1805     Result.swap();
1806     if (Result != AliasResult::MayAlias)
1807       return Result;
1808   }
1809 
1810   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1811     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1812     if (Result != AliasResult::MayAlias)
1813       return Result;
1814   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1815     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1816     Result.swap();
1817     if (Result != AliasResult::MayAlias)
1818       return Result;
1819   }
1820 
1821   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1822     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1823     if (Result != AliasResult::MayAlias)
1824       return Result;
1825   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1826     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1827     Result.swap();
1828     if (Result != AliasResult::MayAlias)
1829       return Result;
1830   }
1831 
1832   // If both pointers are pointing into the same object and one of them
1833   // accesses the entire object, then the accesses must overlap in some way.
1834   if (O1 == O2) {
1835     bool NullIsValidLocation = NullPointerIsDefined(&F);
1836     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1837         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1838          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1839       return AliasResult::PartialAlias;
1840   }
1841 
1842   return AliasResult::MayAlias;
1843 }
1844 
1845 /// Check whether two Values can be considered equivalent.
1846 ///
1847 /// If the values may come from different cycle iterations, this will also
1848 /// check that the values are not part of cycle. We have to do this because we
1849 /// are looking through phi nodes, that is we say
1850 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1851 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1852                                                   const Value *V2,
1853                                                   const AAQueryInfo &AAQI) {
1854   if (V != V2)
1855     return false;
1856 
1857   if (!AAQI.MayBeCrossIteration)
1858     return true;
1859 
1860   // Non-instructions and instructions in the entry block cannot be part of
1861   // a loop.
1862   const Instruction *Inst = dyn_cast<Instruction>(V);
1863   if (!Inst || Inst->getParent()->isEntryBlock())
1864     return true;
1865 
1866   return isNotInCycle(Inst, getDT(AAQI), /*LI*/ nullptr);
1867 }
1868 
1869 /// Computes the symbolic difference between two de-composed GEPs.
1870 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1871                                            const DecomposedGEP &SrcGEP,
1872                                            const AAQueryInfo &AAQI) {
1873   // Drop nuw flag from GEP if subtraction of constant offsets overflows in an
1874   // unsigned sense.
1875   if (DestGEP.Offset.ult(SrcGEP.Offset))
1876     DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1877 
1878   DestGEP.Offset -= SrcGEP.Offset;
1879   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1880     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1881     // than a few variable indexes.
1882     bool Found = false;
1883     for (auto I : enumerate(DestGEP.VarIndices)) {
1884       VariableGEPIndex &Dest = I.value();
1885       if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1886            !areBothVScale(Dest.Val.V, Src.Val.V)) ||
1887           !Dest.Val.hasSameCastsAs(Src.Val))
1888         continue;
1889 
1890       // Normalize IsNegated if we're going to lose the NSW flag anyway.
1891       if (Dest.IsNegated) {
1892         Dest.Scale = -Dest.Scale;
1893         Dest.IsNegated = false;
1894         Dest.IsNSW = false;
1895       }
1896 
1897       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1898       // goes to zero, remove the entry.
1899       if (Dest.Scale != Src.Scale) {
1900         // Drop nuw flag from GEP if subtraction of V's Scale overflows in an
1901         // unsigned sense.
1902         if (Dest.Scale.ult(Src.Scale))
1903           DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1904 
1905         Dest.Scale -= Src.Scale;
1906         Dest.IsNSW = false;
1907       } else {
1908         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1909       }
1910       Found = true;
1911       break;
1912     }
1913 
1914     // If we didn't consume this entry, add it to the end of the Dest list.
1915     if (!Found) {
1916       VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1917                                 /* IsNegated */ true};
1918       DestGEP.VarIndices.push_back(Entry);
1919 
1920       // Drop nuw flag when we have unconsumed variable indices from SrcGEP.
1921       DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1922     }
1923   }
1924 }
1925 
1926 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1927                                             LocationSize MaybeV1Size,
1928                                             LocationSize MaybeV2Size,
1929                                             AssumptionCache *AC,
1930                                             DominatorTree *DT,
1931                                             const AAQueryInfo &AAQI) {
1932   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1933       !MaybeV2Size.hasValue())
1934     return false;
1935 
1936   const uint64_t V1Size = MaybeV1Size.getValue();
1937   const uint64_t V2Size = MaybeV2Size.getValue();
1938 
1939   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1940 
1941   if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1942       !Var0.hasNegatedScaleOf(Var1) ||
1943       Var0.Val.V->getType() != Var1.Val.V->getType())
1944     return false;
1945 
1946   // We'll strip off the Extensions of Var0 and Var1 and do another round
1947   // of GetLinearExpression decomposition. In the example above, if Var0
1948   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1949 
1950   LinearExpression E0 =
1951       GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1952   LinearExpression E1 =
1953       GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1954   if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1955       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1956     return false;
1957 
1958   // We have a hit - Var0 and Var1 only differ by a constant offset!
1959 
1960   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1961   // Var1 is possible to calculate, but we're just interested in the absolute
1962   // minimum difference between the two. The minimum distance may occur due to
1963   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1964   // the minimum distance between %i and %i + 5 is 3.
1965   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1966   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1967   APInt MinDiffBytes =
1968     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1969 
1970   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1971   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1972   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1973   // V2Size can fit in the MinDiffBytes gap.
1974   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1975          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1976 }
1977 
1978 //===----------------------------------------------------------------------===//
1979 // BasicAliasAnalysis Pass
1980 //===----------------------------------------------------------------------===//
1981 
1982 AnalysisKey BasicAA::Key;
1983 
1984 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1985   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1986   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1987   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1988   return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
1989 }
1990 
1991 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1992   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1993 }
1994 
1995 char BasicAAWrapperPass::ID = 0;
1996 
1997 void BasicAAWrapperPass::anchor() {}
1998 
1999 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
2000                       "Basic Alias Analysis (stateless AA impl)", true, true)
2001 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2002 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2003 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2004 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
2005                     "Basic Alias Analysis (stateless AA impl)", true, true)
2006 
2007 FunctionPass *llvm::createBasicAAWrapperPass() {
2008   return new BasicAAWrapperPass();
2009 }
2010 
2011 bool BasicAAWrapperPass::runOnFunction(Function &F) {
2012   auto &ACT = getAnalysis<AssumptionCacheTracker>();
2013   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2014   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2015 
2016   Result.reset(new BasicAAResult(F.getDataLayout(), F,
2017                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2018                                  &DTWP.getDomTree()));
2019 
2020   return false;
2021 }
2022 
2023 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2024   AU.setPreservesAll();
2025   AU.addRequiredTransitive<AssumptionCacheTracker>();
2026   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2027   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
2028 }
2029