xref: /llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp (revision d4b6fcb32e29d0cd834a3c89205fef48fbfc1d2d)
1 //==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the generic AliasAnalysis interface which is used as the
10 // common interface used by all clients and implementations of alias analysis.
11 //
12 // This file also implements the default version of the AliasAnalysis interface
13 // that is to be used when no other implementation is specified.  This does some
14 // simple tests that detect obvious cases: two different global pointers cannot
15 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
16 // etc.
17 //
18 // This alias analysis implementation really isn't very good for anything, but
19 // it is very fast, and makes a nice clean default implementation.  Because it
20 // handles lots of little corner cases, other, more complex, alias analysis
21 // implementations may choose to rely on this pass to resolve these simple and
22 // easy cases.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CaptureTracking.h"
30 #include "llvm/Analysis/GlobalsModRef.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
33 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
34 #include "llvm/Analysis/ScopedNoAliasAA.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/Attributes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include "llvm/InitializePasses.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/AtomicOrdering.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <functional>
53 #include <iterator>
54 
55 #define DEBUG_TYPE "aa"
56 
57 using namespace llvm;
58 
59 STATISTIC(NumNoAlias,   "Number of NoAlias results");
60 STATISTIC(NumMayAlias,  "Number of MayAlias results");
61 STATISTIC(NumMustAlias, "Number of MustAlias results");
62 
63 namespace llvm {
64 /// Allow disabling BasicAA from the AA results. This is particularly useful
65 /// when testing to isolate a single AA implementation.
66 cl::opt<bool> DisableBasicAA("disable-basic-aa", cl::Hidden, cl::init(false));
67 } // namespace llvm
68 
69 #ifndef NDEBUG
70 /// Print a trace of alias analysis queries and their results.
71 static cl::opt<bool> EnableAATrace("aa-trace", cl::Hidden, cl::init(false));
72 #else
73 static const bool EnableAATrace = false;
74 #endif
75 
76 AAResults::AAResults(AAResults &&Arg)
77     : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {}
78 
79 AAResults::~AAResults() {}
80 
81 bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
82                            FunctionAnalysisManager::Invalidator &Inv) {
83   // AAResults preserves the AAManager by default, due to the stateless nature
84   // of AliasAnalysis. There is no need to check whether it has been preserved
85   // explicitly. Check if any module dependency was invalidated and caused the
86   // AAManager to be invalidated. Invalidate ourselves in that case.
87   auto PAC = PA.getChecker<AAManager>();
88   if (!PAC.preservedWhenStateless())
89     return true;
90 
91   // Check if any of the function dependencies were invalidated, and invalidate
92   // ourselves in that case.
93   for (AnalysisKey *ID : AADeps)
94     if (Inv.invalidate(ID, F, PA))
95       return true;
96 
97   // Everything we depend on is still fine, so are we. Nothing to invalidate.
98   return false;
99 }
100 
101 //===----------------------------------------------------------------------===//
102 // Default chaining methods
103 //===----------------------------------------------------------------------===//
104 
105 AliasResult AAResults::alias(const MemoryLocation &LocA,
106                              const MemoryLocation &LocB) {
107   SimpleAAQueryInfo AAQIP(*this);
108   return alias(LocA, LocB, AAQIP);
109 }
110 
111 AliasResult AAResults::alias(const MemoryLocation &LocA,
112                              const MemoryLocation &LocB, AAQueryInfo &AAQI) {
113   AliasResult Result = AliasResult::MayAlias;
114 
115   if (EnableAATrace) {
116     for (unsigned I = 0; I < AAQI.Depth; ++I)
117       dbgs() << "  ";
118     dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", "
119            << *LocB.Ptr << " @ " << LocB.Size << "\n";
120   }
121 
122   AAQI.Depth++;
123   for (const auto &AA : AAs) {
124     Result = AA->alias(LocA, LocB, AAQI);
125     if (Result != AliasResult::MayAlias)
126       break;
127   }
128   AAQI.Depth--;
129 
130   if (EnableAATrace) {
131     for (unsigned I = 0; I < AAQI.Depth; ++I)
132       dbgs() << "  ";
133     dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", "
134            << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n";
135   }
136 
137   if (AAQI.Depth == 0) {
138     if (Result == AliasResult::NoAlias)
139       ++NumNoAlias;
140     else if (Result == AliasResult::MustAlias)
141       ++NumMustAlias;
142     else
143       ++NumMayAlias;
144   }
145   return Result;
146 }
147 
148 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
149                                         bool IgnoreLocals) {
150   SimpleAAQueryInfo AAQIP(*this);
151   return getModRefInfoMask(Loc, AAQIP, IgnoreLocals);
152 }
153 
154 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
155                                         AAQueryInfo &AAQI, bool IgnoreLocals) {
156   ModRefInfo Result = ModRefInfo::ModRef;
157 
158   for (const auto &AA : AAs) {
159     Result &= AA->getModRefInfoMask(Loc, AAQI, IgnoreLocals);
160 
161     // Early-exit the moment we reach the bottom of the lattice.
162     if (isNoModRef(Result))
163       return ModRefInfo::NoModRef;
164   }
165 
166   return Result;
167 }
168 
169 ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
170   ModRefInfo Result = ModRefInfo::ModRef;
171 
172   for (const auto &AA : AAs) {
173     Result &= AA->getArgModRefInfo(Call, ArgIdx);
174 
175     // Early-exit the moment we reach the bottom of the lattice.
176     if (isNoModRef(Result))
177       return ModRefInfo::NoModRef;
178   }
179 
180   return Result;
181 }
182 
183 ModRefInfo AAResults::getModRefInfo(const Instruction *I,
184                                     const CallBase *Call2) {
185   SimpleAAQueryInfo AAQIP(*this);
186   return getModRefInfo(I, Call2, AAQIP);
187 }
188 
189 ModRefInfo AAResults::getModRefInfo(const Instruction *I, const CallBase *Call2,
190                                     AAQueryInfo &AAQI) {
191   // We may have two calls.
192   if (const auto *Call1 = dyn_cast<CallBase>(I)) {
193     // Check if the two calls modify the same memory.
194     return getModRefInfo(Call1, Call2, AAQI);
195   }
196   // If this is a fence, just return ModRef.
197   if (I->isFenceLike())
198     return ModRefInfo::ModRef;
199   // Otherwise, check if the call modifies or references the
200   // location this memory access defines.  The best we can say
201   // is that if the call references what this instruction
202   // defines, it must be clobbered by this location.
203   const MemoryLocation DefLoc = MemoryLocation::get(I);
204   ModRefInfo MR = getModRefInfo(Call2, DefLoc, AAQI);
205   if (isModOrRefSet(MR))
206     return ModRefInfo::ModRef;
207   return ModRefInfo::NoModRef;
208 }
209 
210 ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
211                                     const MemoryLocation &Loc,
212                                     AAQueryInfo &AAQI) {
213   ModRefInfo Result = ModRefInfo::ModRef;
214 
215   for (const auto &AA : AAs) {
216     Result &= AA->getModRefInfo(Call, Loc, AAQI);
217 
218     // Early-exit the moment we reach the bottom of the lattice.
219     if (isNoModRef(Result))
220       return ModRefInfo::NoModRef;
221   }
222 
223   // Try to refine the mod-ref info further using other API entry points to the
224   // aggregate set of AA results.
225 
226   // We can completely ignore inaccessible memory here, because MemoryLocations
227   // can only reference accessible memory.
228   auto ME = getMemoryEffects(Call, AAQI)
229                 .getWithoutLoc(MemoryEffects::InaccessibleMem);
230   if (ME.doesNotAccessMemory())
231     return ModRefInfo::NoModRef;
232 
233   ModRefInfo ArgMR = ME.getModRef(MemoryEffects::ArgMem);
234   ModRefInfo OtherMR = ME.getWithoutLoc(MemoryEffects::ArgMem).getModRef();
235   if ((ArgMR | OtherMR) != OtherMR) {
236     // Refine the modref info for argument memory. We only bother to do this
237     // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
238     // on the final result.
239     ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
240     for (const auto &I : llvm::enumerate(Call->args())) {
241       const Value *Arg = I.value();
242       if (!Arg->getType()->isPointerTy())
243         continue;
244       unsigned ArgIdx = I.index();
245       MemoryLocation ArgLoc = MemoryLocation::getForArgument(Call, ArgIdx, TLI);
246       AliasResult ArgAlias = alias(ArgLoc, Loc, AAQI);
247       if (ArgAlias != AliasResult::NoAlias)
248         AllArgsMask |= getArgModRefInfo(Call, ArgIdx);
249     }
250     ArgMR &= AllArgsMask;
251   }
252 
253   Result &= ArgMR | OtherMR;
254 
255   // Apply the ModRef mask. This ensures that if Loc is a constant memory
256   // location, we take into account the fact that the call definitely could not
257   // modify the memory location.
258   if (!isNoModRef(Result))
259     Result &= getModRefInfoMask(Loc);
260 
261   return Result;
262 }
263 
264 ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
265                                     const CallBase *Call2, AAQueryInfo &AAQI) {
266   ModRefInfo Result = ModRefInfo::ModRef;
267 
268   for (const auto &AA : AAs) {
269     Result &= AA->getModRefInfo(Call1, Call2, AAQI);
270 
271     // Early-exit the moment we reach the bottom of the lattice.
272     if (isNoModRef(Result))
273       return ModRefInfo::NoModRef;
274   }
275 
276   // Try to refine the mod-ref info further using other API entry points to the
277   // aggregate set of AA results.
278 
279   // If Call1 or Call2 are readnone, they don't interact.
280   auto Call1B = getMemoryEffects(Call1, AAQI);
281   if (Call1B.doesNotAccessMemory())
282     return ModRefInfo::NoModRef;
283 
284   auto Call2B = getMemoryEffects(Call2, AAQI);
285   if (Call2B.doesNotAccessMemory())
286     return ModRefInfo::NoModRef;
287 
288   // If they both only read from memory, there is no dependence.
289   if (Call1B.onlyReadsMemory() && Call2B.onlyReadsMemory())
290     return ModRefInfo::NoModRef;
291 
292   // If Call1 only reads memory, the only dependence on Call2 can be
293   // from Call1 reading memory written by Call2.
294   if (Call1B.onlyReadsMemory())
295     Result &= ModRefInfo::Ref;
296   else if (Call1B.onlyWritesMemory())
297     Result &= ModRefInfo::Mod;
298 
299   // If Call2 only access memory through arguments, accumulate the mod/ref
300   // information from Call1's references to the memory referenced by
301   // Call2's arguments.
302   if (Call2B.onlyAccessesArgPointees()) {
303     if (!Call2B.doesAccessArgPointees())
304       return ModRefInfo::NoModRef;
305     ModRefInfo R = ModRefInfo::NoModRef;
306     for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
307       const Value *Arg = *I;
308       if (!Arg->getType()->isPointerTy())
309         continue;
310       unsigned Call2ArgIdx = std::distance(Call2->arg_begin(), I);
311       auto Call2ArgLoc =
312           MemoryLocation::getForArgument(Call2, Call2ArgIdx, TLI);
313 
314       // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
315       // dependence of Call1 on that location is the inverse:
316       // - If Call2 modifies location, dependence exists if Call1 reads or
317       //   writes.
318       // - If Call2 only reads location, dependence exists if Call1 writes.
319       ModRefInfo ArgModRefC2 = getArgModRefInfo(Call2, Call2ArgIdx);
320       ModRefInfo ArgMask = ModRefInfo::NoModRef;
321       if (isModSet(ArgModRefC2))
322         ArgMask = ModRefInfo::ModRef;
323       else if (isRefSet(ArgModRefC2))
324         ArgMask = ModRefInfo::Mod;
325 
326       // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
327       // above ArgMask to update dependence info.
328       ArgMask &= getModRefInfo(Call1, Call2ArgLoc, AAQI);
329 
330       R = (R | ArgMask) & Result;
331       if (R == Result)
332         break;
333     }
334 
335     return R;
336   }
337 
338   // If Call1 only accesses memory through arguments, check if Call2 references
339   // any of the memory referenced by Call1's arguments. If not, return NoModRef.
340   if (Call1B.onlyAccessesArgPointees()) {
341     if (!Call1B.doesAccessArgPointees())
342       return ModRefInfo::NoModRef;
343     ModRefInfo R = ModRefInfo::NoModRef;
344     for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
345       const Value *Arg = *I;
346       if (!Arg->getType()->isPointerTy())
347         continue;
348       unsigned Call1ArgIdx = std::distance(Call1->arg_begin(), I);
349       auto Call1ArgLoc =
350           MemoryLocation::getForArgument(Call1, Call1ArgIdx, TLI);
351 
352       // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
353       // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
354       // Call2. If Call1 might Ref, then we care only about a Mod by Call2.
355       ModRefInfo ArgModRefC1 = getArgModRefInfo(Call1, Call1ArgIdx);
356       ModRefInfo ModRefC2 = getModRefInfo(Call2, Call1ArgLoc, AAQI);
357       if ((isModSet(ArgModRefC1) && isModOrRefSet(ModRefC2)) ||
358           (isRefSet(ArgModRefC1) && isModSet(ModRefC2)))
359         R = (R | ArgModRefC1) & Result;
360 
361       if (R == Result)
362         break;
363     }
364 
365     return R;
366   }
367 
368   return Result;
369 }
370 
371 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call,
372                                           AAQueryInfo &AAQI) {
373   MemoryEffects Result = MemoryEffects::unknown();
374 
375   for (const auto &AA : AAs) {
376     Result &= AA->getMemoryEffects(Call, AAQI);
377 
378     // Early-exit the moment we reach the bottom of the lattice.
379     if (Result.doesNotAccessMemory())
380       return Result;
381   }
382 
383   return Result;
384 }
385 
386 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call) {
387   SimpleAAQueryInfo AAQI(*this);
388   return getMemoryEffects(Call, AAQI);
389 }
390 
391 MemoryEffects AAResults::getMemoryEffects(const Function *F) {
392   MemoryEffects Result = MemoryEffects::unknown();
393 
394   for (const auto &AA : AAs) {
395     Result &= AA->getMemoryEffects(F);
396 
397     // Early-exit the moment we reach the bottom of the lattice.
398     if (Result.doesNotAccessMemory())
399       return Result;
400   }
401 
402   return Result;
403 }
404 
405 raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) {
406   switch (AR) {
407   case AliasResult::NoAlias:
408     OS << "NoAlias";
409     break;
410   case AliasResult::MustAlias:
411     OS << "MustAlias";
412     break;
413   case AliasResult::MayAlias:
414     OS << "MayAlias";
415     break;
416   case AliasResult::PartialAlias:
417     OS << "PartialAlias";
418     if (AR.hasOffset())
419       OS << " (off " << AR.getOffset() << ")";
420     break;
421   }
422   return OS;
423 }
424 
425 raw_ostream &llvm::operator<<(raw_ostream &OS, ModRefInfo MR) {
426   switch (MR) {
427   case ModRefInfo::NoModRef:
428     OS << "NoModRef";
429     break;
430   case ModRefInfo::Ref:
431     OS << "Ref";
432     break;
433   case ModRefInfo::Mod:
434     OS << "Mod";
435     break;
436   case ModRefInfo::ModRef:
437     OS << "ModRef";
438     break;
439   }
440   return OS;
441 }
442 
443 raw_ostream &llvm::operator<<(raw_ostream &OS, MemoryEffects ME) {
444   for (MemoryEffects::Location Loc : MemoryEffects::locations()) {
445     switch (Loc) {
446     case MemoryEffects::ArgMem:
447       OS << "ArgMem: ";
448       break;
449     case MemoryEffects::InaccessibleMem:
450       OS << "InaccessibleMem: ";
451       break;
452     case MemoryEffects::Other:
453       OS << "Other: ";
454       break;
455     }
456     OS << ME.getModRef(Loc) << ", ";
457   }
458   return OS;
459 }
460 
461 //===----------------------------------------------------------------------===//
462 // Helper method implementation
463 //===----------------------------------------------------------------------===//
464 
465 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
466                                     const MemoryLocation &Loc,
467                                     AAQueryInfo &AAQI) {
468   // Be conservative in the face of atomic.
469   if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
470     return ModRefInfo::ModRef;
471 
472   // If the load address doesn't alias the given address, it doesn't read
473   // or write the specified memory.
474   if (Loc.Ptr) {
475     AliasResult AR = alias(MemoryLocation::get(L), Loc, AAQI);
476     if (AR == AliasResult::NoAlias)
477       return ModRefInfo::NoModRef;
478   }
479   // Otherwise, a load just reads.
480   return ModRefInfo::Ref;
481 }
482 
483 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
484                                     const MemoryLocation &Loc,
485                                     AAQueryInfo &AAQI) {
486   // Be conservative in the face of atomic.
487   if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
488     return ModRefInfo::ModRef;
489 
490   if (Loc.Ptr) {
491     AliasResult AR = alias(MemoryLocation::get(S), Loc, AAQI);
492     // If the store address cannot alias the pointer in question, then the
493     // specified memory cannot be modified by the store.
494     if (AR == AliasResult::NoAlias)
495       return ModRefInfo::NoModRef;
496 
497     // Examine the ModRef mask. If Mod isn't present, then return NoModRef.
498     // This ensures that if Loc is a constant memory location, we take into
499     // account the fact that the store definitely could not modify the memory
500     // location.
501     if (!isModSet(getModRefInfoMask(Loc)))
502       return ModRefInfo::NoModRef;
503   }
504 
505   // Otherwise, a store just writes.
506   return ModRefInfo::Mod;
507 }
508 
509 ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
510                                     const MemoryLocation &Loc,
511                                     AAQueryInfo &AAQI) {
512   // All we know about a fence instruction is what we get from the ModRef
513   // mask: if Loc is a constant memory location, the fence definitely could
514   // not modify it.
515   if (Loc.Ptr)
516     return getModRefInfoMask(Loc);
517   return ModRefInfo::ModRef;
518 }
519 
520 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
521                                     const MemoryLocation &Loc,
522                                     AAQueryInfo &AAQI) {
523   if (Loc.Ptr) {
524     AliasResult AR = alias(MemoryLocation::get(V), Loc, AAQI);
525     // If the va_arg address cannot alias the pointer in question, then the
526     // specified memory cannot be accessed by the va_arg.
527     if (AR == AliasResult::NoAlias)
528       return ModRefInfo::NoModRef;
529 
530     // If the pointer is a pointer to invariant memory, then it could not have
531     // been modified by this va_arg.
532     return getModRefInfoMask(Loc, AAQI);
533   }
534 
535   // Otherwise, a va_arg reads and writes.
536   return ModRefInfo::ModRef;
537 }
538 
539 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
540                                     const MemoryLocation &Loc,
541                                     AAQueryInfo &AAQI) {
542   if (Loc.Ptr) {
543     // If the pointer is a pointer to invariant memory,
544     // then it could not have been modified by this catchpad.
545     return getModRefInfoMask(Loc, AAQI);
546   }
547 
548   // Otherwise, a catchpad reads and writes.
549   return ModRefInfo::ModRef;
550 }
551 
552 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
553                                     const MemoryLocation &Loc,
554                                     AAQueryInfo &AAQI) {
555   if (Loc.Ptr) {
556     // If the pointer is a pointer to invariant memory,
557     // then it could not have been modified by this catchpad.
558     return getModRefInfoMask(Loc, AAQI);
559   }
560 
561   // Otherwise, a catchret reads and writes.
562   return ModRefInfo::ModRef;
563 }
564 
565 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
566                                     const MemoryLocation &Loc,
567                                     AAQueryInfo &AAQI) {
568   // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
569   if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
570     return ModRefInfo::ModRef;
571 
572   if (Loc.Ptr) {
573     AliasResult AR = alias(MemoryLocation::get(CX), Loc, AAQI);
574     // If the cmpxchg address does not alias the location, it does not access
575     // it.
576     if (AR == AliasResult::NoAlias)
577       return ModRefInfo::NoModRef;
578   }
579 
580   return ModRefInfo::ModRef;
581 }
582 
583 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
584                                     const MemoryLocation &Loc,
585                                     AAQueryInfo &AAQI) {
586   // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
587   if (isStrongerThanMonotonic(RMW->getOrdering()))
588     return ModRefInfo::ModRef;
589 
590   if (Loc.Ptr) {
591     AliasResult AR = alias(MemoryLocation::get(RMW), Loc, AAQI);
592     // If the atomicrmw address does not alias the location, it does not access
593     // it.
594     if (AR == AliasResult::NoAlias)
595       return ModRefInfo::NoModRef;
596   }
597 
598   return ModRefInfo::ModRef;
599 }
600 
601 ModRefInfo AAResults::getModRefInfo(const Instruction *I,
602                                     const std::optional<MemoryLocation> &OptLoc,
603                                     AAQueryInfo &AAQIP) {
604   if (OptLoc == std::nullopt) {
605     if (const auto *Call = dyn_cast<CallBase>(I))
606       return getMemoryEffects(Call, AAQIP).getModRef();
607   }
608 
609   const MemoryLocation &Loc = OptLoc.value_or(MemoryLocation());
610 
611   switch (I->getOpcode()) {
612   case Instruction::VAArg:
613     return getModRefInfo((const VAArgInst *)I, Loc, AAQIP);
614   case Instruction::Load:
615     return getModRefInfo((const LoadInst *)I, Loc, AAQIP);
616   case Instruction::Store:
617     return getModRefInfo((const StoreInst *)I, Loc, AAQIP);
618   case Instruction::Fence:
619     return getModRefInfo((const FenceInst *)I, Loc, AAQIP);
620   case Instruction::AtomicCmpXchg:
621     return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP);
622   case Instruction::AtomicRMW:
623     return getModRefInfo((const AtomicRMWInst *)I, Loc, AAQIP);
624   case Instruction::Call:
625   case Instruction::CallBr:
626   case Instruction::Invoke:
627     return getModRefInfo((const CallBase *)I, Loc, AAQIP);
628   case Instruction::CatchPad:
629     return getModRefInfo((const CatchPadInst *)I, Loc, AAQIP);
630   case Instruction::CatchRet:
631     return getModRefInfo((const CatchReturnInst *)I, Loc, AAQIP);
632   default:
633     assert(!I->mayReadOrWriteMemory() &&
634            "Unhandled memory access instruction!");
635     return ModRefInfo::NoModRef;
636   }
637 }
638 
639 /// Return information about whether a particular call site modifies
640 /// or reads the specified memory location \p MemLoc before instruction \p I
641 /// in a BasicBlock.
642 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
643 /// BasicAA isn't willing to spend linear time determining whether an alloca
644 /// was captured before or after this particular call, while we are. However,
645 /// with a smarter AA in place, this test is just wasting compile time.
646 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
647                                          const MemoryLocation &MemLoc,
648                                          DominatorTree *DT,
649                                          AAQueryInfo &AAQI) {
650   if (!DT)
651     return ModRefInfo::ModRef;
652 
653   const Value *Object = getUnderlyingObject(MemLoc.Ptr);
654   if (!isIdentifiedFunctionLocal(Object))
655     return ModRefInfo::ModRef;
656 
657   const auto *Call = dyn_cast<CallBase>(I);
658   if (!Call || Call == Object)
659     return ModRefInfo::ModRef;
660 
661   if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
662                                  /* StoreCaptures */ true, I, DT,
663                                  /* include Object */ true))
664     return ModRefInfo::ModRef;
665 
666   unsigned ArgNo = 0;
667   ModRefInfo R = ModRefInfo::NoModRef;
668   // Set flag only if no May found and all operands processed.
669   for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
670        CI != CE; ++CI, ++ArgNo) {
671     // Only look at the no-capture or byval pointer arguments.  If this
672     // pointer were passed to arguments that were neither of these, then it
673     // couldn't be no-capture.
674     if (!(*CI)->getType()->isPointerTy() ||
675         (!Call->doesNotCapture(ArgNo) && ArgNo < Call->arg_size() &&
676          !Call->isByValArgument(ArgNo)))
677       continue;
678 
679     AliasResult AR = alias(
680         MemoryLocation::getBeforeOrAfter(*CI),
681         MemoryLocation::getBeforeOrAfter(Object), AAQI);
682     // If this is a no-capture pointer argument, see if we can tell that it
683     // is impossible to alias the pointer we're checking.  If not, we have to
684     // assume that the call could touch the pointer, even though it doesn't
685     // escape.
686     if (AR == AliasResult::NoAlias)
687       continue;
688     if (Call->doesNotAccessMemory(ArgNo))
689       continue;
690     if (Call->onlyReadsMemory(ArgNo)) {
691       R = ModRefInfo::Ref;
692       continue;
693     }
694     return ModRefInfo::ModRef;
695   }
696   return R;
697 }
698 
699 /// canBasicBlockModify - Return true if it is possible for execution of the
700 /// specified basic block to modify the location Loc.
701 ///
702 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
703                                     const MemoryLocation &Loc) {
704   return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod);
705 }
706 
707 /// canInstructionRangeModRef - Return true if it is possible for the
708 /// execution of the specified instructions to mod\ref (according to the
709 /// mode) the location Loc. The instructions to consider are all
710 /// of the instructions in the range of [I1,I2] INCLUSIVE.
711 /// I1 and I2 must be in the same basic block.
712 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
713                                           const Instruction &I2,
714                                           const MemoryLocation &Loc,
715                                           const ModRefInfo Mode) {
716   assert(I1.getParent() == I2.getParent() &&
717          "Instructions not in same basic block!");
718   BasicBlock::const_iterator I = I1.getIterator();
719   BasicBlock::const_iterator E = I2.getIterator();
720   ++E;  // Convert from inclusive to exclusive range.
721 
722   for (; I != E; ++I) // Check every instruction in range
723     if (isModOrRefSet(getModRefInfo(&*I, Loc) & Mode))
724       return true;
725   return false;
726 }
727 
728 // Provide a definition for the root virtual destructor.
729 AAResults::Concept::~Concept() = default;
730 
731 // Provide a definition for the static object used to identify passes.
732 AnalysisKey AAManager::Key;
733 
734 ExternalAAWrapperPass::ExternalAAWrapperPass() : ImmutablePass(ID) {
735   initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
736 }
737 
738 ExternalAAWrapperPass::ExternalAAWrapperPass(CallbackT CB)
739     : ImmutablePass(ID), CB(std::move(CB)) {
740   initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
741 }
742 
743 char ExternalAAWrapperPass::ID = 0;
744 
745 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
746                 false, true)
747 
748 ImmutablePass *
749 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
750   return new ExternalAAWrapperPass(std::move(Callback));
751 }
752 
753 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
754   initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
755 }
756 
757 char AAResultsWrapperPass::ID = 0;
758 
759 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
760                       "Function Alias Analysis Results", false, true)
761 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
762 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
763 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
764 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
765 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
766 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
767 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
768                     "Function Alias Analysis Results", false, true)
769 
770 FunctionPass *llvm::createAAResultsWrapperPass() {
771   return new AAResultsWrapperPass();
772 }
773 
774 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
775 ///
776 /// This is the legacy pass manager's interface to the new-style AA results
777 /// aggregation object. Because this is somewhat shoe-horned into the legacy
778 /// pass manager, we hard code all the specific alias analyses available into
779 /// it. While the particular set enabled is configured via commandline flags,
780 /// adding a new alias analysis to LLVM will require adding support for it to
781 /// this list.
782 bool AAResultsWrapperPass::runOnFunction(Function &F) {
783   // NB! This *must* be reset before adding new AA results to the new
784   // AAResults object because in the legacy pass manager, each instance
785   // of these will refer to the *same* immutable analyses, registering and
786   // unregistering themselves with them. We need to carefully tear down the
787   // previous object first, in this case replacing it with an empty one, before
788   // registering new results.
789   AAR.reset(
790       new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F)));
791 
792   // BasicAA is always available for function analyses. Also, we add it first
793   // so that it can trump TBAA results when it proves MustAlias.
794   // FIXME: TBAA should have an explicit mode to support this and then we
795   // should reconsider the ordering here.
796   if (!DisableBasicAA)
797     AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
798 
799   // Populate the results with the currently available AAs.
800   if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
801     AAR->addAAResult(WrapperPass->getResult());
802   if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
803     AAR->addAAResult(WrapperPass->getResult());
804   if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
805     AAR->addAAResult(WrapperPass->getResult());
806   if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
807     AAR->addAAResult(WrapperPass->getResult());
808 
809   // If available, run an external AA providing callback over the results as
810   // well.
811   if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
812     if (WrapperPass->CB)
813       WrapperPass->CB(*this, F, *AAR);
814 
815   // Analyses don't mutate the IR, so return false.
816   return false;
817 }
818 
819 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
820   AU.setPreservesAll();
821   AU.addRequiredTransitive<BasicAAWrapperPass>();
822   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
823 
824   // We also need to mark all the alias analysis passes we will potentially
825   // probe in runOnFunction as used here to ensure the legacy pass manager
826   // preserves them. This hard coding of lists of alias analyses is specific to
827   // the legacy pass manager.
828   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
829   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
830   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
831   AU.addUsedIfAvailable<SCEVAAWrapperPass>();
832   AU.addUsedIfAvailable<ExternalAAWrapperPass>();
833 }
834 
835 AAManager::Result AAManager::run(Function &F, FunctionAnalysisManager &AM) {
836   Result R(AM.getResult<TargetLibraryAnalysis>(F));
837   for (auto &Getter : ResultGetters)
838     (*Getter)(F, AM, R);
839   return R;
840 }
841 
842 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
843                                         BasicAAResult &BAR) {
844   AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F));
845 
846   // Add in our explicitly constructed BasicAA results.
847   if (!DisableBasicAA)
848     AAR.addAAResult(BAR);
849 
850   // Populate the results with the other currently available AAs.
851   if (auto *WrapperPass =
852           P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
853     AAR.addAAResult(WrapperPass->getResult());
854   if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
855     AAR.addAAResult(WrapperPass->getResult());
856   if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>())
857     AAR.addAAResult(WrapperPass->getResult());
858   if (auto *WrapperPass = P.getAnalysisIfAvailable<ExternalAAWrapperPass>())
859     if (WrapperPass->CB)
860       WrapperPass->CB(P, F, AAR);
861 
862   return AAR;
863 }
864 
865 bool llvm::isNoAliasCall(const Value *V) {
866   if (const auto *Call = dyn_cast<CallBase>(V))
867     return Call->hasRetAttr(Attribute::NoAlias);
868   return false;
869 }
870 
871 static bool isNoAliasOrByValArgument(const Value *V) {
872   if (const Argument *A = dyn_cast<Argument>(V))
873     return A->hasNoAliasAttr() || A->hasByValAttr();
874   return false;
875 }
876 
877 bool llvm::isIdentifiedObject(const Value *V) {
878   if (isa<AllocaInst>(V))
879     return true;
880   if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
881     return true;
882   if (isNoAliasCall(V))
883     return true;
884   if (isNoAliasOrByValArgument(V))
885     return true;
886   return false;
887 }
888 
889 bool llvm::isIdentifiedFunctionLocal(const Value *V) {
890   return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasOrByValArgument(V);
891 }
892 
893 bool llvm::isEscapeSource(const Value *V) {
894   if (auto *CB = dyn_cast<CallBase>(V))
895     return !isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CB,
896                                                                         true);
897 
898   // The load case works because isNonEscapingLocalObject considers all
899   // stores to be escapes (it passes true for the StoreCaptures argument
900   // to PointerMayBeCaptured).
901   if (isa<LoadInst>(V))
902     return true;
903 
904   // The inttoptr case works because isNonEscapingLocalObject considers all
905   // means of converting or equating a pointer to an int (ptrtoint, ptr store
906   // which could be followed by an integer load, ptr<->int compare) as
907   // escaping, and objects located at well-known addresses via platform-specific
908   // means cannot be considered non-escaping local objects.
909   if (isa<IntToPtrInst>(V))
910     return true;
911 
912   return false;
913 }
914 
915 bool llvm::isNotVisibleOnUnwind(const Value *Object,
916                                 bool &RequiresNoCaptureBeforeUnwind) {
917   RequiresNoCaptureBeforeUnwind = false;
918 
919   // Alloca goes out of scope on unwind.
920   if (isa<AllocaInst>(Object))
921     return true;
922 
923   // Byval goes out of scope on unwind.
924   if (auto *A = dyn_cast<Argument>(Object))
925     return A->hasByValAttr();
926 
927   // A noalias return is not accessible from any other code. If the pointer
928   // does not escape prior to the unwind, then the caller cannot access the
929   // memory either.
930   if (isNoAliasCall(Object)) {
931     RequiresNoCaptureBeforeUnwind = true;
932     return true;
933   }
934 
935   return false;
936 }
937 
938 void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) {
939   // This function needs to be in sync with llvm::createLegacyPMAAResults -- if
940   // more alias analyses are added to llvm::createLegacyPMAAResults, they need
941   // to be added here also.
942   AU.addRequired<TargetLibraryInfoWrapperPass>();
943   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
944   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
945   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
946   AU.addUsedIfAvailable<ExternalAAWrapperPass>();
947 }
948