xref: /llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp (revision c6c864da3fdfbe98f6302f209056fe3069d071ae)
1 //==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the generic AliasAnalysis interface which is used as the
10 // common interface used by all clients and implementations of alias analysis.
11 //
12 // This file also implements the default version of the AliasAnalysis interface
13 // that is to be used when no other implementation is specified.  This does some
14 // simple tests that detect obvious cases: two different global pointers cannot
15 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
16 // etc.
17 //
18 // This alias analysis implementation really isn't very good for anything, but
19 // it is very fast, and makes a nice clean default implementation.  Because it
20 // handles lots of little corner cases, other, more complex, alias analysis
21 // implementations may choose to rely on this pass to resolve these simple and
22 // easy cases.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CaptureTracking.h"
30 #include "llvm/Analysis/GlobalsModRef.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
33 #include "llvm/Analysis/ScopedNoAliasAA.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/Value.h"
44 #include "llvm/InitializePasses.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/AtomicOrdering.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include <cassert>
50 #include <functional>
51 #include <iterator>
52 
53 #define DEBUG_TYPE "aa"
54 
55 using namespace llvm;
56 
57 STATISTIC(NumNoAlias,   "Number of NoAlias results");
58 STATISTIC(NumMayAlias,  "Number of MayAlias results");
59 STATISTIC(NumMustAlias, "Number of MustAlias results");
60 
61 namespace llvm {
62 /// Allow disabling BasicAA from the AA results. This is particularly useful
63 /// when testing to isolate a single AA implementation.
64 cl::opt<bool> DisableBasicAA("disable-basic-aa", cl::Hidden, cl::init(false));
65 } // namespace llvm
66 
67 #ifndef NDEBUG
68 /// Print a trace of alias analysis queries and their results.
69 static cl::opt<bool> EnableAATrace("aa-trace", cl::Hidden, cl::init(false));
70 #else
71 static const bool EnableAATrace = false;
72 #endif
73 
74 AAResults::AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
75 
76 AAResults::AAResults(AAResults &&Arg)
77     : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {}
78 
79 AAResults::~AAResults() {}
80 
81 bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
82                            FunctionAnalysisManager::Invalidator &Inv) {
83   // AAResults preserves the AAManager by default, due to the stateless nature
84   // of AliasAnalysis. There is no need to check whether it has been preserved
85   // explicitly. Check if any module dependency was invalidated and caused the
86   // AAManager to be invalidated. Invalidate ourselves in that case.
87   auto PAC = PA.getChecker<AAManager>();
88   if (!PAC.preservedWhenStateless())
89     return true;
90 
91   // Check if any of the function dependencies were invalidated, and invalidate
92   // ourselves in that case.
93   for (AnalysisKey *ID : AADeps)
94     if (Inv.invalidate(ID, F, PA))
95       return true;
96 
97   // Everything we depend on is still fine, so are we. Nothing to invalidate.
98   return false;
99 }
100 
101 //===----------------------------------------------------------------------===//
102 // Default chaining methods
103 //===----------------------------------------------------------------------===//
104 
105 AliasResult AAResults::alias(const MemoryLocation &LocA,
106                              const MemoryLocation &LocB) {
107   SimpleAAQueryInfo AAQIP(*this);
108   return alias(LocA, LocB, AAQIP, nullptr);
109 }
110 
111 AliasResult AAResults::alias(const MemoryLocation &LocA,
112                              const MemoryLocation &LocB, AAQueryInfo &AAQI,
113                              const Instruction *CtxI) {
114   AliasResult Result = AliasResult::MayAlias;
115 
116   if (EnableAATrace) {
117     for (unsigned I = 0; I < AAQI.Depth; ++I)
118       dbgs() << "  ";
119     dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", "
120            << *LocB.Ptr << " @ " << LocB.Size << "\n";
121   }
122 
123   AAQI.Depth++;
124   for (const auto &AA : AAs) {
125     Result = AA->alias(LocA, LocB, AAQI, CtxI);
126     if (Result != AliasResult::MayAlias)
127       break;
128   }
129   AAQI.Depth--;
130 
131   if (EnableAATrace) {
132     for (unsigned I = 0; I < AAQI.Depth; ++I)
133       dbgs() << "  ";
134     dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", "
135            << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n";
136   }
137 
138   if (AAQI.Depth == 0) {
139     if (Result == AliasResult::NoAlias)
140       ++NumNoAlias;
141     else if (Result == AliasResult::MustAlias)
142       ++NumMustAlias;
143     else
144       ++NumMayAlias;
145   }
146   return Result;
147 }
148 
149 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
150                                         bool IgnoreLocals) {
151   SimpleAAQueryInfo AAQIP(*this);
152   return getModRefInfoMask(Loc, AAQIP, IgnoreLocals);
153 }
154 
155 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
156                                         AAQueryInfo &AAQI, bool IgnoreLocals) {
157   ModRefInfo Result = ModRefInfo::ModRef;
158 
159   for (const auto &AA : AAs) {
160     Result &= AA->getModRefInfoMask(Loc, AAQI, IgnoreLocals);
161 
162     // Early-exit the moment we reach the bottom of the lattice.
163     if (isNoModRef(Result))
164       return ModRefInfo::NoModRef;
165   }
166 
167   return Result;
168 }
169 
170 ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
171   ModRefInfo Result = ModRefInfo::ModRef;
172 
173   for (const auto &AA : AAs) {
174     Result &= AA->getArgModRefInfo(Call, ArgIdx);
175 
176     // Early-exit the moment we reach the bottom of the lattice.
177     if (isNoModRef(Result))
178       return ModRefInfo::NoModRef;
179   }
180 
181   return Result;
182 }
183 
184 ModRefInfo AAResults::getModRefInfo(const Instruction *I,
185                                     const CallBase *Call2) {
186   SimpleAAQueryInfo AAQIP(*this);
187   return getModRefInfo(I, Call2, AAQIP);
188 }
189 
190 ModRefInfo AAResults::getModRefInfo(const Instruction *I, const CallBase *Call2,
191                                     AAQueryInfo &AAQI) {
192   // We may have two calls.
193   if (const auto *Call1 = dyn_cast<CallBase>(I)) {
194     // Check if the two calls modify the same memory.
195     return getModRefInfo(Call1, Call2, AAQI);
196   }
197   // If this is a fence, just return ModRef.
198   if (I->isFenceLike())
199     return ModRefInfo::ModRef;
200   // Otherwise, check if the call modifies or references the
201   // location this memory access defines.  The best we can say
202   // is that if the call references what this instruction
203   // defines, it must be clobbered by this location.
204   const MemoryLocation DefLoc = MemoryLocation::get(I);
205   ModRefInfo MR = getModRefInfo(Call2, DefLoc, AAQI);
206   if (isModOrRefSet(MR))
207     return ModRefInfo::ModRef;
208   return ModRefInfo::NoModRef;
209 }
210 
211 ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
212                                     const MemoryLocation &Loc,
213                                     AAQueryInfo &AAQI) {
214   ModRefInfo Result = ModRefInfo::ModRef;
215 
216   for (const auto &AA : AAs) {
217     Result &= AA->getModRefInfo(Call, Loc, AAQI);
218 
219     // Early-exit the moment we reach the bottom of the lattice.
220     if (isNoModRef(Result))
221       return ModRefInfo::NoModRef;
222   }
223 
224   // Try to refine the mod-ref info further using other API entry points to the
225   // aggregate set of AA results.
226 
227   // We can completely ignore inaccessible memory here, because MemoryLocations
228   // can only reference accessible memory.
229   auto ME = getMemoryEffects(Call, AAQI)
230                 .getWithoutLoc(IRMemLocation::InaccessibleMem);
231   if (ME.doesNotAccessMemory())
232     return ModRefInfo::NoModRef;
233 
234   ModRefInfo ArgMR = ME.getModRef(IRMemLocation::ArgMem);
235   ModRefInfo OtherMR = ME.getWithoutLoc(IRMemLocation::ArgMem).getModRef();
236   if ((ArgMR | OtherMR) != OtherMR) {
237     // Refine the modref info for argument memory. We only bother to do this
238     // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
239     // on the final result.
240     ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
241     for (const auto &I : llvm::enumerate(Call->args())) {
242       const Value *Arg = I.value();
243       if (!Arg->getType()->isPointerTy())
244         continue;
245       unsigned ArgIdx = I.index();
246       MemoryLocation ArgLoc = MemoryLocation::getForArgument(Call, ArgIdx, TLI);
247       AliasResult ArgAlias = alias(ArgLoc, Loc, AAQI, Call);
248       if (ArgAlias != AliasResult::NoAlias)
249         AllArgsMask |= getArgModRefInfo(Call, ArgIdx);
250     }
251     ArgMR &= AllArgsMask;
252   }
253 
254   Result &= ArgMR | OtherMR;
255 
256   // Apply the ModRef mask. This ensures that if Loc is a constant memory
257   // location, we take into account the fact that the call definitely could not
258   // modify the memory location.
259   if (!isNoModRef(Result))
260     Result &= getModRefInfoMask(Loc);
261 
262   return Result;
263 }
264 
265 ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
266                                     const CallBase *Call2, AAQueryInfo &AAQI) {
267   ModRefInfo Result = ModRefInfo::ModRef;
268 
269   for (const auto &AA : AAs) {
270     Result &= AA->getModRefInfo(Call1, Call2, AAQI);
271 
272     // Early-exit the moment we reach the bottom of the lattice.
273     if (isNoModRef(Result))
274       return ModRefInfo::NoModRef;
275   }
276 
277   // Try to refine the mod-ref info further using other API entry points to the
278   // aggregate set of AA results.
279 
280   // If Call1 or Call2 are readnone, they don't interact.
281   auto Call1B = getMemoryEffects(Call1, AAQI);
282   if (Call1B.doesNotAccessMemory())
283     return ModRefInfo::NoModRef;
284 
285   auto Call2B = getMemoryEffects(Call2, AAQI);
286   if (Call2B.doesNotAccessMemory())
287     return ModRefInfo::NoModRef;
288 
289   // If they both only read from memory, there is no dependence.
290   if (Call1B.onlyReadsMemory() && Call2B.onlyReadsMemory())
291     return ModRefInfo::NoModRef;
292 
293   // If Call1 only reads memory, the only dependence on Call2 can be
294   // from Call1 reading memory written by Call2.
295   if (Call1B.onlyReadsMemory())
296     Result &= ModRefInfo::Ref;
297   else if (Call1B.onlyWritesMemory())
298     Result &= ModRefInfo::Mod;
299 
300   // If Call2 only access memory through arguments, accumulate the mod/ref
301   // information from Call1's references to the memory referenced by
302   // Call2's arguments.
303   if (Call2B.onlyAccessesArgPointees()) {
304     if (!Call2B.doesAccessArgPointees())
305       return ModRefInfo::NoModRef;
306     ModRefInfo R = ModRefInfo::NoModRef;
307     for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
308       const Value *Arg = *I;
309       if (!Arg->getType()->isPointerTy())
310         continue;
311       unsigned Call2ArgIdx = std::distance(Call2->arg_begin(), I);
312       auto Call2ArgLoc =
313           MemoryLocation::getForArgument(Call2, Call2ArgIdx, TLI);
314 
315       // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
316       // dependence of Call1 on that location is the inverse:
317       // - If Call2 modifies location, dependence exists if Call1 reads or
318       //   writes.
319       // - If Call2 only reads location, dependence exists if Call1 writes.
320       ModRefInfo ArgModRefC2 = getArgModRefInfo(Call2, Call2ArgIdx);
321       ModRefInfo ArgMask = ModRefInfo::NoModRef;
322       if (isModSet(ArgModRefC2))
323         ArgMask = ModRefInfo::ModRef;
324       else if (isRefSet(ArgModRefC2))
325         ArgMask = ModRefInfo::Mod;
326 
327       // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
328       // above ArgMask to update dependence info.
329       ArgMask &= getModRefInfo(Call1, Call2ArgLoc, AAQI);
330 
331       R = (R | ArgMask) & Result;
332       if (R == Result)
333         break;
334     }
335 
336     return R;
337   }
338 
339   // If Call1 only accesses memory through arguments, check if Call2 references
340   // any of the memory referenced by Call1's arguments. If not, return NoModRef.
341   if (Call1B.onlyAccessesArgPointees()) {
342     if (!Call1B.doesAccessArgPointees())
343       return ModRefInfo::NoModRef;
344     ModRefInfo R = ModRefInfo::NoModRef;
345     for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
346       const Value *Arg = *I;
347       if (!Arg->getType()->isPointerTy())
348         continue;
349       unsigned Call1ArgIdx = std::distance(Call1->arg_begin(), I);
350       auto Call1ArgLoc =
351           MemoryLocation::getForArgument(Call1, Call1ArgIdx, TLI);
352 
353       // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
354       // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
355       // Call2. If Call1 might Ref, then we care only about a Mod by Call2.
356       ModRefInfo ArgModRefC1 = getArgModRefInfo(Call1, Call1ArgIdx);
357       ModRefInfo ModRefC2 = getModRefInfo(Call2, Call1ArgLoc, AAQI);
358       if ((isModSet(ArgModRefC1) && isModOrRefSet(ModRefC2)) ||
359           (isRefSet(ArgModRefC1) && isModSet(ModRefC2)))
360         R = (R | ArgModRefC1) & Result;
361 
362       if (R == Result)
363         break;
364     }
365 
366     return R;
367   }
368 
369   return Result;
370 }
371 
372 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call,
373                                           AAQueryInfo &AAQI) {
374   MemoryEffects Result = MemoryEffects::unknown();
375 
376   for (const auto &AA : AAs) {
377     Result &= AA->getMemoryEffects(Call, AAQI);
378 
379     // Early-exit the moment we reach the bottom of the lattice.
380     if (Result.doesNotAccessMemory())
381       return Result;
382   }
383 
384   return Result;
385 }
386 
387 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call) {
388   SimpleAAQueryInfo AAQI(*this);
389   return getMemoryEffects(Call, AAQI);
390 }
391 
392 MemoryEffects AAResults::getMemoryEffects(const Function *F) {
393   MemoryEffects Result = MemoryEffects::unknown();
394 
395   for (const auto &AA : AAs) {
396     Result &= AA->getMemoryEffects(F);
397 
398     // Early-exit the moment we reach the bottom of the lattice.
399     if (Result.doesNotAccessMemory())
400       return Result;
401   }
402 
403   return Result;
404 }
405 
406 raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) {
407   switch (AR) {
408   case AliasResult::NoAlias:
409     OS << "NoAlias";
410     break;
411   case AliasResult::MustAlias:
412     OS << "MustAlias";
413     break;
414   case AliasResult::MayAlias:
415     OS << "MayAlias";
416     break;
417   case AliasResult::PartialAlias:
418     OS << "PartialAlias";
419     if (AR.hasOffset())
420       OS << " (off " << AR.getOffset() << ")";
421     break;
422   }
423   return OS;
424 }
425 
426 //===----------------------------------------------------------------------===//
427 // Helper method implementation
428 //===----------------------------------------------------------------------===//
429 
430 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
431                                     const MemoryLocation &Loc,
432                                     AAQueryInfo &AAQI) {
433   // Be conservative in the face of atomic.
434   if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
435     return ModRefInfo::ModRef;
436 
437   // If the load address doesn't alias the given address, it doesn't read
438   // or write the specified memory.
439   if (Loc.Ptr) {
440     AliasResult AR = alias(MemoryLocation::get(L), Loc, AAQI, L);
441     if (AR == AliasResult::NoAlias)
442       return ModRefInfo::NoModRef;
443   }
444   // Otherwise, a load just reads.
445   return ModRefInfo::Ref;
446 }
447 
448 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
449                                     const MemoryLocation &Loc,
450                                     AAQueryInfo &AAQI) {
451   // Be conservative in the face of atomic.
452   if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
453     return ModRefInfo::ModRef;
454 
455   if (Loc.Ptr) {
456     AliasResult AR = alias(MemoryLocation::get(S), Loc, AAQI, S);
457     // If the store address cannot alias the pointer in question, then the
458     // specified memory cannot be modified by the store.
459     if (AR == AliasResult::NoAlias)
460       return ModRefInfo::NoModRef;
461 
462     // Examine the ModRef mask. If Mod isn't present, then return NoModRef.
463     // This ensures that if Loc is a constant memory location, we take into
464     // account the fact that the store definitely could not modify the memory
465     // location.
466     if (!isModSet(getModRefInfoMask(Loc)))
467       return ModRefInfo::NoModRef;
468   }
469 
470   // Otherwise, a store just writes.
471   return ModRefInfo::Mod;
472 }
473 
474 ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
475                                     const MemoryLocation &Loc,
476                                     AAQueryInfo &AAQI) {
477   // All we know about a fence instruction is what we get from the ModRef
478   // mask: if Loc is a constant memory location, the fence definitely could
479   // not modify it.
480   if (Loc.Ptr)
481     return getModRefInfoMask(Loc);
482   return ModRefInfo::ModRef;
483 }
484 
485 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
486                                     const MemoryLocation &Loc,
487                                     AAQueryInfo &AAQI) {
488   if (Loc.Ptr) {
489     AliasResult AR = alias(MemoryLocation::get(V), Loc, AAQI, V);
490     // If the va_arg address cannot alias the pointer in question, then the
491     // specified memory cannot be accessed by the va_arg.
492     if (AR == AliasResult::NoAlias)
493       return ModRefInfo::NoModRef;
494 
495     // If the pointer is a pointer to invariant memory, then it could not have
496     // been modified by this va_arg.
497     return getModRefInfoMask(Loc, AAQI);
498   }
499 
500   // Otherwise, a va_arg reads and writes.
501   return ModRefInfo::ModRef;
502 }
503 
504 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
505                                     const MemoryLocation &Loc,
506                                     AAQueryInfo &AAQI) {
507   if (Loc.Ptr) {
508     // If the pointer is a pointer to invariant memory,
509     // then it could not have been modified by this catchpad.
510     return getModRefInfoMask(Loc, AAQI);
511   }
512 
513   // Otherwise, a catchpad reads and writes.
514   return ModRefInfo::ModRef;
515 }
516 
517 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
518                                     const MemoryLocation &Loc,
519                                     AAQueryInfo &AAQI) {
520   if (Loc.Ptr) {
521     // If the pointer is a pointer to invariant memory,
522     // then it could not have been modified by this catchpad.
523     return getModRefInfoMask(Loc, AAQI);
524   }
525 
526   // Otherwise, a catchret reads and writes.
527   return ModRefInfo::ModRef;
528 }
529 
530 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
531                                     const MemoryLocation &Loc,
532                                     AAQueryInfo &AAQI) {
533   // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
534   if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
535     return ModRefInfo::ModRef;
536 
537   if (Loc.Ptr) {
538     AliasResult AR = alias(MemoryLocation::get(CX), Loc, AAQI, CX);
539     // If the cmpxchg address does not alias the location, it does not access
540     // it.
541     if (AR == AliasResult::NoAlias)
542       return ModRefInfo::NoModRef;
543   }
544 
545   return ModRefInfo::ModRef;
546 }
547 
548 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
549                                     const MemoryLocation &Loc,
550                                     AAQueryInfo &AAQI) {
551   // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
552   if (isStrongerThanMonotonic(RMW->getOrdering()))
553     return ModRefInfo::ModRef;
554 
555   if (Loc.Ptr) {
556     AliasResult AR = alias(MemoryLocation::get(RMW), Loc, AAQI, RMW);
557     // If the atomicrmw address does not alias the location, it does not access
558     // it.
559     if (AR == AliasResult::NoAlias)
560       return ModRefInfo::NoModRef;
561   }
562 
563   return ModRefInfo::ModRef;
564 }
565 
566 ModRefInfo AAResults::getModRefInfo(const Instruction *I,
567                                     const std::optional<MemoryLocation> &OptLoc,
568                                     AAQueryInfo &AAQIP) {
569   if (OptLoc == std::nullopt) {
570     if (const auto *Call = dyn_cast<CallBase>(I))
571       return getMemoryEffects(Call, AAQIP).getModRef();
572   }
573 
574   const MemoryLocation &Loc = OptLoc.value_or(MemoryLocation());
575 
576   switch (I->getOpcode()) {
577   case Instruction::VAArg:
578     return getModRefInfo((const VAArgInst *)I, Loc, AAQIP);
579   case Instruction::Load:
580     return getModRefInfo((const LoadInst *)I, Loc, AAQIP);
581   case Instruction::Store:
582     return getModRefInfo((const StoreInst *)I, Loc, AAQIP);
583   case Instruction::Fence:
584     return getModRefInfo((const FenceInst *)I, Loc, AAQIP);
585   case Instruction::AtomicCmpXchg:
586     return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP);
587   case Instruction::AtomicRMW:
588     return getModRefInfo((const AtomicRMWInst *)I, Loc, AAQIP);
589   case Instruction::Call:
590   case Instruction::CallBr:
591   case Instruction::Invoke:
592     return getModRefInfo((const CallBase *)I, Loc, AAQIP);
593   case Instruction::CatchPad:
594     return getModRefInfo((const CatchPadInst *)I, Loc, AAQIP);
595   case Instruction::CatchRet:
596     return getModRefInfo((const CatchReturnInst *)I, Loc, AAQIP);
597   default:
598     assert(!I->mayReadOrWriteMemory() &&
599            "Unhandled memory access instruction!");
600     return ModRefInfo::NoModRef;
601   }
602 }
603 
604 /// Return information about whether a particular call site modifies
605 /// or reads the specified memory location \p MemLoc before instruction \p I
606 /// in a BasicBlock.
607 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
608 /// BasicAA isn't willing to spend linear time determining whether an alloca
609 /// was captured before or after this particular call, while we are. However,
610 /// with a smarter AA in place, this test is just wasting compile time.
611 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
612                                          const MemoryLocation &MemLoc,
613                                          DominatorTree *DT,
614                                          AAQueryInfo &AAQI) {
615   if (!DT)
616     return ModRefInfo::ModRef;
617 
618   const Value *Object = getUnderlyingObject(MemLoc.Ptr);
619   if (!isIdentifiedFunctionLocal(Object))
620     return ModRefInfo::ModRef;
621 
622   const auto *Call = dyn_cast<CallBase>(I);
623   if (!Call || Call == Object)
624     return ModRefInfo::ModRef;
625 
626   if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
627                                  /* StoreCaptures */ true, I, DT,
628                                  /* include Object */ true))
629     return ModRefInfo::ModRef;
630 
631   unsigned ArgNo = 0;
632   ModRefInfo R = ModRefInfo::NoModRef;
633   // Set flag only if no May found and all operands processed.
634   for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
635        CI != CE; ++CI, ++ArgNo) {
636     // Only look at the no-capture or byval pointer arguments.  If this
637     // pointer were passed to arguments that were neither of these, then it
638     // couldn't be no-capture.
639     if (!(*CI)->getType()->isPointerTy() || !Call->doesNotCapture(ArgNo))
640       continue;
641 
642     AliasResult AR =
643         alias(MemoryLocation::getBeforeOrAfter(*CI),
644               MemoryLocation::getBeforeOrAfter(Object), AAQI, Call);
645     // If this is a no-capture pointer argument, see if we can tell that it
646     // is impossible to alias the pointer we're checking.  If not, we have to
647     // assume that the call could touch the pointer, even though it doesn't
648     // escape.
649     if (AR == AliasResult::NoAlias)
650       continue;
651     if (Call->doesNotAccessMemory(ArgNo))
652       continue;
653     if (Call->onlyReadsMemory(ArgNo)) {
654       R = ModRefInfo::Ref;
655       continue;
656     }
657     return ModRefInfo::ModRef;
658   }
659   return R;
660 }
661 
662 /// canBasicBlockModify - Return true if it is possible for execution of the
663 /// specified basic block to modify the location Loc.
664 ///
665 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
666                                     const MemoryLocation &Loc) {
667   return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod);
668 }
669 
670 /// canInstructionRangeModRef - Return true if it is possible for the
671 /// execution of the specified instructions to mod\ref (according to the
672 /// mode) the location Loc. The instructions to consider are all
673 /// of the instructions in the range of [I1,I2] INCLUSIVE.
674 /// I1 and I2 must be in the same basic block.
675 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
676                                           const Instruction &I2,
677                                           const MemoryLocation &Loc,
678                                           const ModRefInfo Mode) {
679   assert(I1.getParent() == I2.getParent() &&
680          "Instructions not in same basic block!");
681   BasicBlock::const_iterator I = I1.getIterator();
682   BasicBlock::const_iterator E = I2.getIterator();
683   ++E;  // Convert from inclusive to exclusive range.
684 
685   for (; I != E; ++I) // Check every instruction in range
686     if (isModOrRefSet(getModRefInfo(&*I, Loc) & Mode))
687       return true;
688   return false;
689 }
690 
691 // Provide a definition for the root virtual destructor.
692 AAResults::Concept::~Concept() = default;
693 
694 // Provide a definition for the static object used to identify passes.
695 AnalysisKey AAManager::Key;
696 
697 ExternalAAWrapperPass::ExternalAAWrapperPass() : ImmutablePass(ID) {
698   initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
699 }
700 
701 ExternalAAWrapperPass::ExternalAAWrapperPass(CallbackT CB)
702     : ImmutablePass(ID), CB(std::move(CB)) {
703   initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
704 }
705 
706 char ExternalAAWrapperPass::ID = 0;
707 
708 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
709                 false, true)
710 
711 ImmutablePass *
712 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
713   return new ExternalAAWrapperPass(std::move(Callback));
714 }
715 
716 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
717   initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
718 }
719 
720 char AAResultsWrapperPass::ID = 0;
721 
722 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
723                       "Function Alias Analysis Results", false, true)
724 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
725 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
726 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
727 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
728 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
729 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
730 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
731                     "Function Alias Analysis Results", false, true)
732 
733 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
734 ///
735 /// This is the legacy pass manager's interface to the new-style AA results
736 /// aggregation object. Because this is somewhat shoe-horned into the legacy
737 /// pass manager, we hard code all the specific alias analyses available into
738 /// it. While the particular set enabled is configured via commandline flags,
739 /// adding a new alias analysis to LLVM will require adding support for it to
740 /// this list.
741 bool AAResultsWrapperPass::runOnFunction(Function &F) {
742   // NB! This *must* be reset before adding new AA results to the new
743   // AAResults object because in the legacy pass manager, each instance
744   // of these will refer to the *same* immutable analyses, registering and
745   // unregistering themselves with them. We need to carefully tear down the
746   // previous object first, in this case replacing it with an empty one, before
747   // registering new results.
748   AAR.reset(
749       new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F)));
750 
751   // BasicAA is always available for function analyses. Also, we add it first
752   // so that it can trump TBAA results when it proves MustAlias.
753   // FIXME: TBAA should have an explicit mode to support this and then we
754   // should reconsider the ordering here.
755   if (!DisableBasicAA)
756     AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
757 
758   // Populate the results with the currently available AAs.
759   if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
760     AAR->addAAResult(WrapperPass->getResult());
761   if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
762     AAR->addAAResult(WrapperPass->getResult());
763   if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
764     AAR->addAAResult(WrapperPass->getResult());
765   if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
766     AAR->addAAResult(WrapperPass->getResult());
767 
768   // If available, run an external AA providing callback over the results as
769   // well.
770   if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
771     if (WrapperPass->CB)
772       WrapperPass->CB(*this, F, *AAR);
773 
774   // Analyses don't mutate the IR, so return false.
775   return false;
776 }
777 
778 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
779   AU.setPreservesAll();
780   AU.addRequiredTransitive<BasicAAWrapperPass>();
781   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
782 
783   // We also need to mark all the alias analysis passes we will potentially
784   // probe in runOnFunction as used here to ensure the legacy pass manager
785   // preserves them. This hard coding of lists of alias analyses is specific to
786   // the legacy pass manager.
787   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
788   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
789   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
790   AU.addUsedIfAvailable<SCEVAAWrapperPass>();
791   AU.addUsedIfAvailable<ExternalAAWrapperPass>();
792 }
793 
794 AAManager::Result AAManager::run(Function &F, FunctionAnalysisManager &AM) {
795   Result R(AM.getResult<TargetLibraryAnalysis>(F));
796   for (auto &Getter : ResultGetters)
797     (*Getter)(F, AM, R);
798   return R;
799 }
800 
801 bool llvm::isNoAliasCall(const Value *V) {
802   if (const auto *Call = dyn_cast<CallBase>(V))
803     return Call->hasRetAttr(Attribute::NoAlias);
804   return false;
805 }
806 
807 static bool isNoAliasOrByValArgument(const Value *V) {
808   if (const Argument *A = dyn_cast<Argument>(V))
809     return A->hasNoAliasAttr() || A->hasByValAttr();
810   return false;
811 }
812 
813 bool llvm::isIdentifiedObject(const Value *V) {
814   if (isa<AllocaInst>(V))
815     return true;
816   if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
817     return true;
818   if (isNoAliasCall(V))
819     return true;
820   if (isNoAliasOrByValArgument(V))
821     return true;
822   return false;
823 }
824 
825 bool llvm::isIdentifiedFunctionLocal(const Value *V) {
826   return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasOrByValArgument(V);
827 }
828 
829 bool llvm::isBaseOfObject(const Value *V) {
830   // TODO: We can handle other cases here
831   // 1) For GC languages, arguments to functions are often required to be
832   //    base pointers.
833   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
834   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
835 }
836 
837 bool llvm::isEscapeSource(const Value *V) {
838   if (auto *CB = dyn_cast<CallBase>(V))
839     return !isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CB,
840                                                                         true);
841 
842   // The load case works because isNonEscapingLocalObject considers all
843   // stores to be escapes (it passes true for the StoreCaptures argument
844   // to PointerMayBeCaptured).
845   if (isa<LoadInst>(V))
846     return true;
847 
848   // The inttoptr case works because isNonEscapingLocalObject considers all
849   // means of converting or equating a pointer to an int (ptrtoint, ptr store
850   // which could be followed by an integer load, ptr<->int compare) as
851   // escaping, and objects located at well-known addresses via platform-specific
852   // means cannot be considered non-escaping local objects.
853   if (isa<IntToPtrInst>(V))
854     return true;
855 
856   // Same for inttoptr constant expressions.
857   if (auto *CE = dyn_cast<ConstantExpr>(V))
858     if (CE->getOpcode() == Instruction::IntToPtr)
859       return true;
860 
861   return false;
862 }
863 
864 bool llvm::isNotVisibleOnUnwind(const Value *Object,
865                                 bool &RequiresNoCaptureBeforeUnwind) {
866   RequiresNoCaptureBeforeUnwind = false;
867 
868   // Alloca goes out of scope on unwind.
869   if (isa<AllocaInst>(Object))
870     return true;
871 
872   // Byval goes out of scope on unwind.
873   if (auto *A = dyn_cast<Argument>(Object))
874     return A->hasByValAttr() || A->hasAttribute(Attribute::DeadOnUnwind);
875 
876   // A noalias return is not accessible from any other code. If the pointer
877   // does not escape prior to the unwind, then the caller cannot access the
878   // memory either.
879   if (isNoAliasCall(Object)) {
880     RequiresNoCaptureBeforeUnwind = true;
881     return true;
882   }
883 
884   return false;
885 }
886 
887 // We don't consider globals as writable: While the physical memory is writable,
888 // we may not have provenance to perform the write.
889 bool llvm::isWritableObject(const Value *Object,
890                             bool &ExplicitlyDereferenceableOnly) {
891   ExplicitlyDereferenceableOnly = false;
892 
893   // TODO: Alloca might not be writable after its lifetime ends.
894   // See https://github.com/llvm/llvm-project/issues/51838.
895   if (isa<AllocaInst>(Object))
896     return true;
897 
898   if (auto *A = dyn_cast<Argument>(Object)) {
899     // Also require noalias, otherwise writability at function entry cannot be
900     // generalized to writability at other program points, even if the pointer
901     // does not escape.
902     if (A->hasAttribute(Attribute::Writable) && A->hasNoAliasAttr()) {
903       ExplicitlyDereferenceableOnly = true;
904       return true;
905     }
906 
907     return A->hasByValAttr();
908   }
909 
910   // TODO: Noalias shouldn't imply writability, this should check for an
911   // allocator function instead.
912   return isNoAliasCall(Object);
913 }
914