1 //==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the generic AliasAnalysis interface which is used as the 10 // common interface used by all clients and implementations of alias analysis. 11 // 12 // This file also implements the default version of the AliasAnalysis interface 13 // that is to be used when no other implementation is specified. This does some 14 // simple tests that detect obvious cases: two different global pointers cannot 15 // alias, a global cannot alias a malloc, two different mallocs cannot alias, 16 // etc. 17 // 18 // This alias analysis implementation really isn't very good for anything, but 19 // it is very fast, and makes a nice clean default implementation. Because it 20 // handles lots of little corner cases, other, more complex, alias analysis 21 // implementations may choose to rely on this pass to resolve these simple and 22 // easy cases. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/Analysis/BasicAliasAnalysis.h" 29 #include "llvm/Analysis/CFLAndersAliasAnalysis.h" 30 #include "llvm/Analysis/CFLSteensAliasAnalysis.h" 31 #include "llvm/Analysis/CaptureTracking.h" 32 #include "llvm/Analysis/GlobalsModRef.h" 33 #include "llvm/Analysis/MemoryLocation.h" 34 #include "llvm/Analysis/ObjCARCAliasAnalysis.h" 35 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 36 #include "llvm/Analysis/ScopedNoAliasAA.h" 37 #include "llvm/Analysis/TargetLibraryInfo.h" 38 #include "llvm/Analysis/TypeBasedAliasAnalysis.h" 39 #include "llvm/Analysis/ValueTracking.h" 40 #include "llvm/IR/Argument.h" 41 #include "llvm/IR/Attributes.h" 42 #include "llvm/IR/BasicBlock.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/Value.h" 47 #include "llvm/InitializePasses.h" 48 #include "llvm/Pass.h" 49 #include "llvm/Support/AtomicOrdering.h" 50 #include "llvm/Support/Casting.h" 51 #include "llvm/Support/CommandLine.h" 52 #include <algorithm> 53 #include <cassert> 54 #include <functional> 55 #include <iterator> 56 57 #define DEBUG_TYPE "aa" 58 59 using namespace llvm; 60 61 STATISTIC(NumNoAlias, "Number of NoAlias results"); 62 STATISTIC(NumMayAlias, "Number of MayAlias results"); 63 STATISTIC(NumMustAlias, "Number of MustAlias results"); 64 65 namespace llvm { 66 /// Allow disabling BasicAA from the AA results. This is particularly useful 67 /// when testing to isolate a single AA implementation. 68 cl::opt<bool> DisableBasicAA("disable-basic-aa", cl::Hidden, cl::init(false)); 69 } // namespace llvm 70 71 #ifndef NDEBUG 72 /// Print a trace of alias analysis queries and their results. 73 static cl::opt<bool> EnableAATrace("aa-trace", cl::Hidden, cl::init(false)); 74 #else 75 static const bool EnableAATrace = false; 76 #endif 77 78 AAResults::AAResults(AAResults &&Arg) 79 : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {} 80 81 AAResults::~AAResults() {} 82 83 bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA, 84 FunctionAnalysisManager::Invalidator &Inv) { 85 // AAResults preserves the AAManager by default, due to the stateless nature 86 // of AliasAnalysis. There is no need to check whether it has been preserved 87 // explicitly. Check if any module dependency was invalidated and caused the 88 // AAManager to be invalidated. Invalidate ourselves in that case. 89 auto PAC = PA.getChecker<AAManager>(); 90 if (!PAC.preservedWhenStateless()) 91 return true; 92 93 // Check if any of the function dependencies were invalidated, and invalidate 94 // ourselves in that case. 95 for (AnalysisKey *ID : AADeps) 96 if (Inv.invalidate(ID, F, PA)) 97 return true; 98 99 // Everything we depend on is still fine, so are we. Nothing to invalidate. 100 return false; 101 } 102 103 //===----------------------------------------------------------------------===// 104 // Default chaining methods 105 //===----------------------------------------------------------------------===// 106 107 AliasResult AAResults::alias(const MemoryLocation &LocA, 108 const MemoryLocation &LocB) { 109 SimpleAAQueryInfo AAQIP(*this); 110 return alias(LocA, LocB, AAQIP); 111 } 112 113 AliasResult AAResults::alias(const MemoryLocation &LocA, 114 const MemoryLocation &LocB, AAQueryInfo &AAQI) { 115 AliasResult Result = AliasResult::MayAlias; 116 117 if (EnableAATrace) { 118 for (unsigned I = 0; I < AAQI.Depth; ++I) 119 dbgs() << " "; 120 dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", " 121 << *LocB.Ptr << " @ " << LocB.Size << "\n"; 122 } 123 124 AAQI.Depth++; 125 for (const auto &AA : AAs) { 126 Result = AA->alias(LocA, LocB, AAQI); 127 if (Result != AliasResult::MayAlias) 128 break; 129 } 130 AAQI.Depth--; 131 132 if (EnableAATrace) { 133 for (unsigned I = 0; I < AAQI.Depth; ++I) 134 dbgs() << " "; 135 dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", " 136 << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n"; 137 } 138 139 if (AAQI.Depth == 0) { 140 if (Result == AliasResult::NoAlias) 141 ++NumNoAlias; 142 else if (Result == AliasResult::MustAlias) 143 ++NumMustAlias; 144 else 145 ++NumMayAlias; 146 } 147 return Result; 148 } 149 150 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc, 151 bool IgnoreLocals) { 152 SimpleAAQueryInfo AAQIP(*this); 153 return getModRefInfoMask(Loc, AAQIP, IgnoreLocals); 154 } 155 156 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc, 157 AAQueryInfo &AAQI, bool IgnoreLocals) { 158 ModRefInfo Result = ModRefInfo::ModRef; 159 160 for (const auto &AA : AAs) { 161 Result &= AA->getModRefInfoMask(Loc, AAQI, IgnoreLocals); 162 163 // Early-exit the moment we reach the bottom of the lattice. 164 if (isNoModRef(Result)) 165 return ModRefInfo::NoModRef; 166 } 167 168 return Result; 169 } 170 171 ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { 172 ModRefInfo Result = ModRefInfo::ModRef; 173 174 for (const auto &AA : AAs) { 175 Result &= AA->getArgModRefInfo(Call, ArgIdx); 176 177 // Early-exit the moment we reach the bottom of the lattice. 178 if (isNoModRef(Result)) 179 return ModRefInfo::NoModRef; 180 } 181 182 return Result; 183 } 184 185 ModRefInfo AAResults::getModRefInfo(const Instruction *I, 186 const CallBase *Call2) { 187 SimpleAAQueryInfo AAQIP(*this); 188 return getModRefInfo(I, Call2, AAQIP); 189 } 190 191 ModRefInfo AAResults::getModRefInfo(const Instruction *I, const CallBase *Call2, 192 AAQueryInfo &AAQI) { 193 // We may have two calls. 194 if (const auto *Call1 = dyn_cast<CallBase>(I)) { 195 // Check if the two calls modify the same memory. 196 return getModRefInfo(Call1, Call2, AAQI); 197 } 198 // If this is a fence, just return ModRef. 199 if (I->isFenceLike()) 200 return ModRefInfo::ModRef; 201 // Otherwise, check if the call modifies or references the 202 // location this memory access defines. The best we can say 203 // is that if the call references what this instruction 204 // defines, it must be clobbered by this location. 205 const MemoryLocation DefLoc = MemoryLocation::get(I); 206 ModRefInfo MR = getModRefInfo(Call2, DefLoc, AAQI); 207 if (isModOrRefSet(MR)) 208 return ModRefInfo::ModRef; 209 return ModRefInfo::NoModRef; 210 } 211 212 ModRefInfo AAResults::getModRefInfo(const CallBase *Call, 213 const MemoryLocation &Loc, 214 AAQueryInfo &AAQI) { 215 ModRefInfo Result = ModRefInfo::ModRef; 216 217 for (const auto &AA : AAs) { 218 Result &= AA->getModRefInfo(Call, Loc, AAQI); 219 220 // Early-exit the moment we reach the bottom of the lattice. 221 if (isNoModRef(Result)) 222 return ModRefInfo::NoModRef; 223 } 224 225 // Try to refine the mod-ref info further using other API entry points to the 226 // aggregate set of AA results. 227 228 // We can completely ignore inaccessible memory here, because MemoryLocations 229 // can only reference accessible memory. 230 auto ME = getMemoryEffects(Call, AAQI) 231 .getWithoutLoc(MemoryEffects::InaccessibleMem); 232 if (ME.doesNotAccessMemory()) 233 return ModRefInfo::NoModRef; 234 235 ModRefInfo ArgMR = ME.getModRef(MemoryEffects::ArgMem); 236 ModRefInfo OtherMR = ME.getWithoutLoc(MemoryEffects::ArgMem).getModRef(); 237 if ((ArgMR | OtherMR) != OtherMR) { 238 // Refine the modref info for argument memory. We only bother to do this 239 // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact 240 // on the final result. 241 ModRefInfo AllArgsMask = ModRefInfo::NoModRef; 242 for (const auto &I : llvm::enumerate(Call->args())) { 243 const Value *Arg = I.value(); 244 if (!Arg->getType()->isPointerTy()) 245 continue; 246 unsigned ArgIdx = I.index(); 247 MemoryLocation ArgLoc = MemoryLocation::getForArgument(Call, ArgIdx, TLI); 248 AliasResult ArgAlias = alias(ArgLoc, Loc, AAQI); 249 if (ArgAlias != AliasResult::NoAlias) 250 AllArgsMask |= getArgModRefInfo(Call, ArgIdx); 251 } 252 ArgMR &= AllArgsMask; 253 } 254 255 Result &= ArgMR | OtherMR; 256 257 // Apply the ModRef mask. This ensures that if Loc is a constant memory 258 // location, we take into account the fact that the call definitely could not 259 // modify the memory location. 260 if (!isNoModRef(Result)) 261 Result &= getModRefInfoMask(Loc); 262 263 return Result; 264 } 265 266 ModRefInfo AAResults::getModRefInfo(const CallBase *Call1, 267 const CallBase *Call2, AAQueryInfo &AAQI) { 268 ModRefInfo Result = ModRefInfo::ModRef; 269 270 for (const auto &AA : AAs) { 271 Result &= AA->getModRefInfo(Call1, Call2, AAQI); 272 273 // Early-exit the moment we reach the bottom of the lattice. 274 if (isNoModRef(Result)) 275 return ModRefInfo::NoModRef; 276 } 277 278 // Try to refine the mod-ref info further using other API entry points to the 279 // aggregate set of AA results. 280 281 // If Call1 or Call2 are readnone, they don't interact. 282 auto Call1B = getMemoryEffects(Call1, AAQI); 283 if (Call1B.doesNotAccessMemory()) 284 return ModRefInfo::NoModRef; 285 286 auto Call2B = getMemoryEffects(Call2, AAQI); 287 if (Call2B.doesNotAccessMemory()) 288 return ModRefInfo::NoModRef; 289 290 // If they both only read from memory, there is no dependence. 291 if (Call1B.onlyReadsMemory() && Call2B.onlyReadsMemory()) 292 return ModRefInfo::NoModRef; 293 294 // If Call1 only reads memory, the only dependence on Call2 can be 295 // from Call1 reading memory written by Call2. 296 if (Call1B.onlyReadsMemory()) 297 Result &= ModRefInfo::Ref; 298 else if (Call1B.onlyWritesMemory()) 299 Result &= ModRefInfo::Mod; 300 301 // If Call2 only access memory through arguments, accumulate the mod/ref 302 // information from Call1's references to the memory referenced by 303 // Call2's arguments. 304 if (Call2B.onlyAccessesArgPointees()) { 305 if (!Call2B.doesAccessArgPointees()) 306 return ModRefInfo::NoModRef; 307 ModRefInfo R = ModRefInfo::NoModRef; 308 for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) { 309 const Value *Arg = *I; 310 if (!Arg->getType()->isPointerTy()) 311 continue; 312 unsigned Call2ArgIdx = std::distance(Call2->arg_begin(), I); 313 auto Call2ArgLoc = 314 MemoryLocation::getForArgument(Call2, Call2ArgIdx, TLI); 315 316 // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the 317 // dependence of Call1 on that location is the inverse: 318 // - If Call2 modifies location, dependence exists if Call1 reads or 319 // writes. 320 // - If Call2 only reads location, dependence exists if Call1 writes. 321 ModRefInfo ArgModRefC2 = getArgModRefInfo(Call2, Call2ArgIdx); 322 ModRefInfo ArgMask = ModRefInfo::NoModRef; 323 if (isModSet(ArgModRefC2)) 324 ArgMask = ModRefInfo::ModRef; 325 else if (isRefSet(ArgModRefC2)) 326 ArgMask = ModRefInfo::Mod; 327 328 // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use 329 // above ArgMask to update dependence info. 330 ArgMask &= getModRefInfo(Call1, Call2ArgLoc, AAQI); 331 332 R = (R | ArgMask) & Result; 333 if (R == Result) 334 break; 335 } 336 337 return R; 338 } 339 340 // If Call1 only accesses memory through arguments, check if Call2 references 341 // any of the memory referenced by Call1's arguments. If not, return NoModRef. 342 if (Call1B.onlyAccessesArgPointees()) { 343 if (!Call1B.doesAccessArgPointees()) 344 return ModRefInfo::NoModRef; 345 ModRefInfo R = ModRefInfo::NoModRef; 346 for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) { 347 const Value *Arg = *I; 348 if (!Arg->getType()->isPointerTy()) 349 continue; 350 unsigned Call1ArgIdx = std::distance(Call1->arg_begin(), I); 351 auto Call1ArgLoc = 352 MemoryLocation::getForArgument(Call1, Call1ArgIdx, TLI); 353 354 // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1 355 // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by 356 // Call2. If Call1 might Ref, then we care only about a Mod by Call2. 357 ModRefInfo ArgModRefC1 = getArgModRefInfo(Call1, Call1ArgIdx); 358 ModRefInfo ModRefC2 = getModRefInfo(Call2, Call1ArgLoc, AAQI); 359 if ((isModSet(ArgModRefC1) && isModOrRefSet(ModRefC2)) || 360 (isRefSet(ArgModRefC1) && isModSet(ModRefC2))) 361 R = (R | ArgModRefC1) & Result; 362 363 if (R == Result) 364 break; 365 } 366 367 return R; 368 } 369 370 return Result; 371 } 372 373 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call, 374 AAQueryInfo &AAQI) { 375 MemoryEffects Result = MemoryEffects::unknown(); 376 377 for (const auto &AA : AAs) { 378 Result &= AA->getMemoryEffects(Call, AAQI); 379 380 // Early-exit the moment we reach the bottom of the lattice. 381 if (Result.doesNotAccessMemory()) 382 return Result; 383 } 384 385 return Result; 386 } 387 388 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call) { 389 SimpleAAQueryInfo AAQI(*this); 390 return getMemoryEffects(Call, AAQI); 391 } 392 393 MemoryEffects AAResults::getMemoryEffects(const Function *F) { 394 MemoryEffects Result = MemoryEffects::unknown(); 395 396 for (const auto &AA : AAs) { 397 Result &= AA->getMemoryEffects(F); 398 399 // Early-exit the moment we reach the bottom of the lattice. 400 if (Result.doesNotAccessMemory()) 401 return Result; 402 } 403 404 return Result; 405 } 406 407 raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) { 408 switch (AR) { 409 case AliasResult::NoAlias: 410 OS << "NoAlias"; 411 break; 412 case AliasResult::MustAlias: 413 OS << "MustAlias"; 414 break; 415 case AliasResult::MayAlias: 416 OS << "MayAlias"; 417 break; 418 case AliasResult::PartialAlias: 419 OS << "PartialAlias"; 420 if (AR.hasOffset()) 421 OS << " (off " << AR.getOffset() << ")"; 422 break; 423 } 424 return OS; 425 } 426 427 raw_ostream &llvm::operator<<(raw_ostream &OS, ModRefInfo MR) { 428 switch (MR) { 429 case ModRefInfo::NoModRef: 430 OS << "NoModRef"; 431 break; 432 case ModRefInfo::Ref: 433 OS << "Ref"; 434 break; 435 case ModRefInfo::Mod: 436 OS << "Mod"; 437 break; 438 case ModRefInfo::ModRef: 439 OS << "ModRef"; 440 break; 441 } 442 return OS; 443 } 444 445 raw_ostream &llvm::operator<<(raw_ostream &OS, MemoryEffects ME) { 446 for (MemoryEffects::Location Loc : MemoryEffects::locations()) { 447 switch (Loc) { 448 case MemoryEffects::ArgMem: 449 OS << "ArgMem: "; 450 break; 451 case MemoryEffects::InaccessibleMem: 452 OS << "InaccessibleMem: "; 453 break; 454 case MemoryEffects::Other: 455 OS << "Other: "; 456 break; 457 } 458 OS << ME.getModRef(Loc) << ", "; 459 } 460 return OS; 461 } 462 463 //===----------------------------------------------------------------------===// 464 // Helper method implementation 465 //===----------------------------------------------------------------------===// 466 467 ModRefInfo AAResults::getModRefInfo(const LoadInst *L, 468 const MemoryLocation &Loc, 469 AAQueryInfo &AAQI) { 470 // Be conservative in the face of atomic. 471 if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered)) 472 return ModRefInfo::ModRef; 473 474 // If the load address doesn't alias the given address, it doesn't read 475 // or write the specified memory. 476 if (Loc.Ptr) { 477 AliasResult AR = alias(MemoryLocation::get(L), Loc, AAQI); 478 if (AR == AliasResult::NoAlias) 479 return ModRefInfo::NoModRef; 480 } 481 // Otherwise, a load just reads. 482 return ModRefInfo::Ref; 483 } 484 485 ModRefInfo AAResults::getModRefInfo(const StoreInst *S, 486 const MemoryLocation &Loc, 487 AAQueryInfo &AAQI) { 488 // Be conservative in the face of atomic. 489 if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered)) 490 return ModRefInfo::ModRef; 491 492 if (Loc.Ptr) { 493 AliasResult AR = alias(MemoryLocation::get(S), Loc, AAQI); 494 // If the store address cannot alias the pointer in question, then the 495 // specified memory cannot be modified by the store. 496 if (AR == AliasResult::NoAlias) 497 return ModRefInfo::NoModRef; 498 499 // Examine the ModRef mask. If Mod isn't present, then return NoModRef. 500 // This ensures that if Loc is a constant memory location, we take into 501 // account the fact that the store definitely could not modify the memory 502 // location. 503 if (!isModSet(getModRefInfoMask(Loc))) 504 return ModRefInfo::NoModRef; 505 } 506 507 // Otherwise, a store just writes. 508 return ModRefInfo::Mod; 509 } 510 511 ModRefInfo AAResults::getModRefInfo(const FenceInst *S, 512 const MemoryLocation &Loc, 513 AAQueryInfo &AAQI) { 514 // All we know about a fence instruction is what we get from the ModRef 515 // mask: if Loc is a constant memory location, the fence definitely could 516 // not modify it. 517 if (Loc.Ptr) 518 return getModRefInfoMask(Loc); 519 return ModRefInfo::ModRef; 520 } 521 522 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V, 523 const MemoryLocation &Loc, 524 AAQueryInfo &AAQI) { 525 if (Loc.Ptr) { 526 AliasResult AR = alias(MemoryLocation::get(V), Loc, AAQI); 527 // If the va_arg address cannot alias the pointer in question, then the 528 // specified memory cannot be accessed by the va_arg. 529 if (AR == AliasResult::NoAlias) 530 return ModRefInfo::NoModRef; 531 532 // If the pointer is a pointer to invariant memory, then it could not have 533 // been modified by this va_arg. 534 return getModRefInfoMask(Loc, AAQI); 535 } 536 537 // Otherwise, a va_arg reads and writes. 538 return ModRefInfo::ModRef; 539 } 540 541 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad, 542 const MemoryLocation &Loc, 543 AAQueryInfo &AAQI) { 544 if (Loc.Ptr) { 545 // If the pointer is a pointer to invariant memory, 546 // then it could not have been modified by this catchpad. 547 return getModRefInfoMask(Loc, AAQI); 548 } 549 550 // Otherwise, a catchpad reads and writes. 551 return ModRefInfo::ModRef; 552 } 553 554 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet, 555 const MemoryLocation &Loc, 556 AAQueryInfo &AAQI) { 557 if (Loc.Ptr) { 558 // If the pointer is a pointer to invariant memory, 559 // then it could not have been modified by this catchpad. 560 return getModRefInfoMask(Loc, AAQI); 561 } 562 563 // Otherwise, a catchret reads and writes. 564 return ModRefInfo::ModRef; 565 } 566 567 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, 568 const MemoryLocation &Loc, 569 AAQueryInfo &AAQI) { 570 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. 571 if (isStrongerThanMonotonic(CX->getSuccessOrdering())) 572 return ModRefInfo::ModRef; 573 574 if (Loc.Ptr) { 575 AliasResult AR = alias(MemoryLocation::get(CX), Loc, AAQI); 576 // If the cmpxchg address does not alias the location, it does not access 577 // it. 578 if (AR == AliasResult::NoAlias) 579 return ModRefInfo::NoModRef; 580 } 581 582 return ModRefInfo::ModRef; 583 } 584 585 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, 586 const MemoryLocation &Loc, 587 AAQueryInfo &AAQI) { 588 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. 589 if (isStrongerThanMonotonic(RMW->getOrdering())) 590 return ModRefInfo::ModRef; 591 592 if (Loc.Ptr) { 593 AliasResult AR = alias(MemoryLocation::get(RMW), Loc, AAQI); 594 // If the atomicrmw address does not alias the location, it does not access 595 // it. 596 if (AR == AliasResult::NoAlias) 597 return ModRefInfo::NoModRef; 598 } 599 600 return ModRefInfo::ModRef; 601 } 602 603 ModRefInfo AAResults::getModRefInfo(const Instruction *I, 604 const Optional<MemoryLocation> &OptLoc, 605 AAQueryInfo &AAQIP) { 606 if (OptLoc == None) { 607 if (const auto *Call = dyn_cast<CallBase>(I)) 608 return getMemoryEffects(Call, AAQIP).getModRef(); 609 } 610 611 const MemoryLocation &Loc = OptLoc.value_or(MemoryLocation()); 612 613 switch (I->getOpcode()) { 614 case Instruction::VAArg: 615 return getModRefInfo((const VAArgInst *)I, Loc, AAQIP); 616 case Instruction::Load: 617 return getModRefInfo((const LoadInst *)I, Loc, AAQIP); 618 case Instruction::Store: 619 return getModRefInfo((const StoreInst *)I, Loc, AAQIP); 620 case Instruction::Fence: 621 return getModRefInfo((const FenceInst *)I, Loc, AAQIP); 622 case Instruction::AtomicCmpXchg: 623 return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP); 624 case Instruction::AtomicRMW: 625 return getModRefInfo((const AtomicRMWInst *)I, Loc, AAQIP); 626 case Instruction::Call: 627 case Instruction::CallBr: 628 case Instruction::Invoke: 629 return getModRefInfo((const CallBase *)I, Loc, AAQIP); 630 case Instruction::CatchPad: 631 return getModRefInfo((const CatchPadInst *)I, Loc, AAQIP); 632 case Instruction::CatchRet: 633 return getModRefInfo((const CatchReturnInst *)I, Loc, AAQIP); 634 default: 635 assert(!I->mayReadOrWriteMemory() && 636 "Unhandled memory access instruction!"); 637 return ModRefInfo::NoModRef; 638 } 639 } 640 641 /// Return information about whether a particular call site modifies 642 /// or reads the specified memory location \p MemLoc before instruction \p I 643 /// in a BasicBlock. 644 /// FIXME: this is really just shoring-up a deficiency in alias analysis. 645 /// BasicAA isn't willing to spend linear time determining whether an alloca 646 /// was captured before or after this particular call, while we are. However, 647 /// with a smarter AA in place, this test is just wasting compile time. 648 ModRefInfo AAResults::callCapturesBefore(const Instruction *I, 649 const MemoryLocation &MemLoc, 650 DominatorTree *DT, 651 AAQueryInfo &AAQI) { 652 if (!DT) 653 return ModRefInfo::ModRef; 654 655 const Value *Object = getUnderlyingObject(MemLoc.Ptr); 656 if (!isIdentifiedFunctionLocal(Object)) 657 return ModRefInfo::ModRef; 658 659 const auto *Call = dyn_cast<CallBase>(I); 660 if (!Call || Call == Object) 661 return ModRefInfo::ModRef; 662 663 if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true, 664 /* StoreCaptures */ true, I, DT, 665 /* include Object */ true)) 666 return ModRefInfo::ModRef; 667 668 unsigned ArgNo = 0; 669 ModRefInfo R = ModRefInfo::NoModRef; 670 // Set flag only if no May found and all operands processed. 671 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 672 CI != CE; ++CI, ++ArgNo) { 673 // Only look at the no-capture or byval pointer arguments. If this 674 // pointer were passed to arguments that were neither of these, then it 675 // couldn't be no-capture. 676 if (!(*CI)->getType()->isPointerTy() || 677 (!Call->doesNotCapture(ArgNo) && ArgNo < Call->arg_size() && 678 !Call->isByValArgument(ArgNo))) 679 continue; 680 681 AliasResult AR = alias( 682 MemoryLocation::getBeforeOrAfter(*CI), 683 MemoryLocation::getBeforeOrAfter(Object), AAQI); 684 // If this is a no-capture pointer argument, see if we can tell that it 685 // is impossible to alias the pointer we're checking. If not, we have to 686 // assume that the call could touch the pointer, even though it doesn't 687 // escape. 688 if (AR == AliasResult::NoAlias) 689 continue; 690 if (Call->doesNotAccessMemory(ArgNo)) 691 continue; 692 if (Call->onlyReadsMemory(ArgNo)) { 693 R = ModRefInfo::Ref; 694 continue; 695 } 696 return ModRefInfo::ModRef; 697 } 698 return R; 699 } 700 701 /// canBasicBlockModify - Return true if it is possible for execution of the 702 /// specified basic block to modify the location Loc. 703 /// 704 bool AAResults::canBasicBlockModify(const BasicBlock &BB, 705 const MemoryLocation &Loc) { 706 return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod); 707 } 708 709 /// canInstructionRangeModRef - Return true if it is possible for the 710 /// execution of the specified instructions to mod\ref (according to the 711 /// mode) the location Loc. The instructions to consider are all 712 /// of the instructions in the range of [I1,I2] INCLUSIVE. 713 /// I1 and I2 must be in the same basic block. 714 bool AAResults::canInstructionRangeModRef(const Instruction &I1, 715 const Instruction &I2, 716 const MemoryLocation &Loc, 717 const ModRefInfo Mode) { 718 assert(I1.getParent() == I2.getParent() && 719 "Instructions not in same basic block!"); 720 BasicBlock::const_iterator I = I1.getIterator(); 721 BasicBlock::const_iterator E = I2.getIterator(); 722 ++E; // Convert from inclusive to exclusive range. 723 724 for (; I != E; ++I) // Check every instruction in range 725 if (isModOrRefSet(getModRefInfo(&*I, Loc) & Mode)) 726 return true; 727 return false; 728 } 729 730 // Provide a definition for the root virtual destructor. 731 AAResults::Concept::~Concept() = default; 732 733 // Provide a definition for the static object used to identify passes. 734 AnalysisKey AAManager::Key; 735 736 ExternalAAWrapperPass::ExternalAAWrapperPass() : ImmutablePass(ID) { 737 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); 738 } 739 740 ExternalAAWrapperPass::ExternalAAWrapperPass(CallbackT CB) 741 : ImmutablePass(ID), CB(std::move(CB)) { 742 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); 743 } 744 745 char ExternalAAWrapperPass::ID = 0; 746 747 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis", 748 false, true) 749 750 ImmutablePass * 751 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) { 752 return new ExternalAAWrapperPass(std::move(Callback)); 753 } 754 755 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) { 756 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry()); 757 } 758 759 char AAResultsWrapperPass::ID = 0; 760 761 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa", 762 "Function Alias Analysis Results", false, true) 763 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 764 INITIALIZE_PASS_DEPENDENCY(CFLAndersAAWrapperPass) 765 INITIALIZE_PASS_DEPENDENCY(CFLSteensAAWrapperPass) 766 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass) 767 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 768 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) 769 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass) 770 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass) 771 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa", 772 "Function Alias Analysis Results", false, true) 773 774 FunctionPass *llvm::createAAResultsWrapperPass() { 775 return new AAResultsWrapperPass(); 776 } 777 778 /// Run the wrapper pass to rebuild an aggregation over known AA passes. 779 /// 780 /// This is the legacy pass manager's interface to the new-style AA results 781 /// aggregation object. Because this is somewhat shoe-horned into the legacy 782 /// pass manager, we hard code all the specific alias analyses available into 783 /// it. While the particular set enabled is configured via commandline flags, 784 /// adding a new alias analysis to LLVM will require adding support for it to 785 /// this list. 786 bool AAResultsWrapperPass::runOnFunction(Function &F) { 787 // NB! This *must* be reset before adding new AA results to the new 788 // AAResults object because in the legacy pass manager, each instance 789 // of these will refer to the *same* immutable analyses, registering and 790 // unregistering themselves with them. We need to carefully tear down the 791 // previous object first, in this case replacing it with an empty one, before 792 // registering new results. 793 AAR.reset( 794 new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F))); 795 796 // BasicAA is always available for function analyses. Also, we add it first 797 // so that it can trump TBAA results when it proves MustAlias. 798 // FIXME: TBAA should have an explicit mode to support this and then we 799 // should reconsider the ordering here. 800 if (!DisableBasicAA) 801 AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult()); 802 803 // Populate the results with the currently available AAs. 804 if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) 805 AAR->addAAResult(WrapperPass->getResult()); 806 if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) 807 AAR->addAAResult(WrapperPass->getResult()); 808 if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>()) 809 AAR->addAAResult(WrapperPass->getResult()); 810 if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>()) 811 AAR->addAAResult(WrapperPass->getResult()); 812 if (auto *WrapperPass = getAnalysisIfAvailable<CFLAndersAAWrapperPass>()) 813 AAR->addAAResult(WrapperPass->getResult()); 814 if (auto *WrapperPass = getAnalysisIfAvailable<CFLSteensAAWrapperPass>()) 815 AAR->addAAResult(WrapperPass->getResult()); 816 817 // If available, run an external AA providing callback over the results as 818 // well. 819 if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>()) 820 if (WrapperPass->CB) 821 WrapperPass->CB(*this, F, *AAR); 822 823 // Analyses don't mutate the IR, so return false. 824 return false; 825 } 826 827 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 828 AU.setPreservesAll(); 829 AU.addRequiredTransitive<BasicAAWrapperPass>(); 830 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 831 832 // We also need to mark all the alias analysis passes we will potentially 833 // probe in runOnFunction as used here to ensure the legacy pass manager 834 // preserves them. This hard coding of lists of alias analyses is specific to 835 // the legacy pass manager. 836 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>(); 837 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>(); 838 AU.addUsedIfAvailable<GlobalsAAWrapperPass>(); 839 AU.addUsedIfAvailable<SCEVAAWrapperPass>(); 840 AU.addUsedIfAvailable<CFLAndersAAWrapperPass>(); 841 AU.addUsedIfAvailable<CFLSteensAAWrapperPass>(); 842 AU.addUsedIfAvailable<ExternalAAWrapperPass>(); 843 } 844 845 AAManager::Result AAManager::run(Function &F, FunctionAnalysisManager &AM) { 846 Result R(AM.getResult<TargetLibraryAnalysis>(F)); 847 for (auto &Getter : ResultGetters) 848 (*Getter)(F, AM, R); 849 return R; 850 } 851 852 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F, 853 BasicAAResult &BAR) { 854 AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F)); 855 856 // Add in our explicitly constructed BasicAA results. 857 if (!DisableBasicAA) 858 AAR.addAAResult(BAR); 859 860 // Populate the results with the other currently available AAs. 861 if (auto *WrapperPass = 862 P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) 863 AAR.addAAResult(WrapperPass->getResult()); 864 if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) 865 AAR.addAAResult(WrapperPass->getResult()); 866 if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>()) 867 AAR.addAAResult(WrapperPass->getResult()); 868 if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAndersAAWrapperPass>()) 869 AAR.addAAResult(WrapperPass->getResult()); 870 if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLSteensAAWrapperPass>()) 871 AAR.addAAResult(WrapperPass->getResult()); 872 if (auto *WrapperPass = P.getAnalysisIfAvailable<ExternalAAWrapperPass>()) 873 if (WrapperPass->CB) 874 WrapperPass->CB(P, F, AAR); 875 876 return AAR; 877 } 878 879 bool llvm::isNoAliasCall(const Value *V) { 880 if (const auto *Call = dyn_cast<CallBase>(V)) 881 return Call->hasRetAttr(Attribute::NoAlias); 882 return false; 883 } 884 885 static bool isNoAliasOrByValArgument(const Value *V) { 886 if (const Argument *A = dyn_cast<Argument>(V)) 887 return A->hasNoAliasAttr() || A->hasByValAttr(); 888 return false; 889 } 890 891 bool llvm::isIdentifiedObject(const Value *V) { 892 if (isa<AllocaInst>(V)) 893 return true; 894 if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V)) 895 return true; 896 if (isNoAliasCall(V)) 897 return true; 898 if (isNoAliasOrByValArgument(V)) 899 return true; 900 return false; 901 } 902 903 bool llvm::isIdentifiedFunctionLocal(const Value *V) { 904 return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasOrByValArgument(V); 905 } 906 907 bool llvm::isEscapeSource(const Value *V) { 908 if (auto *CB = dyn_cast<CallBase>(V)) 909 return !isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CB, 910 true); 911 912 // The load case works because isNonEscapingLocalObject considers all 913 // stores to be escapes (it passes true for the StoreCaptures argument 914 // to PointerMayBeCaptured). 915 if (isa<LoadInst>(V)) 916 return true; 917 918 // The inttoptr case works because isNonEscapingLocalObject considers all 919 // means of converting or equating a pointer to an int (ptrtoint, ptr store 920 // which could be followed by an integer load, ptr<->int compare) as 921 // escaping, and objects located at well-known addresses via platform-specific 922 // means cannot be considered non-escaping local objects. 923 if (isa<IntToPtrInst>(V)) 924 return true; 925 926 return false; 927 } 928 929 bool llvm::isNotVisibleOnUnwind(const Value *Object, 930 bool &RequiresNoCaptureBeforeUnwind) { 931 RequiresNoCaptureBeforeUnwind = false; 932 933 // Alloca goes out of scope on unwind. 934 if (isa<AllocaInst>(Object)) 935 return true; 936 937 // Byval goes out of scope on unwind. 938 if (auto *A = dyn_cast<Argument>(Object)) 939 return A->hasByValAttr(); 940 941 // A noalias return is not accessible from any other code. If the pointer 942 // does not escape prior to the unwind, then the caller cannot access the 943 // memory either. 944 if (isNoAliasCall(Object)) { 945 RequiresNoCaptureBeforeUnwind = true; 946 return true; 947 } 948 949 return false; 950 } 951 952 void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) { 953 // This function needs to be in sync with llvm::createLegacyPMAAResults -- if 954 // more alias analyses are added to llvm::createLegacyPMAAResults, they need 955 // to be added here also. 956 AU.addRequired<TargetLibraryInfoWrapperPass>(); 957 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>(); 958 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>(); 959 AU.addUsedIfAvailable<GlobalsAAWrapperPass>(); 960 AU.addUsedIfAvailable<CFLAndersAAWrapperPass>(); 961 AU.addUsedIfAvailable<CFLSteensAAWrapperPass>(); 962 AU.addUsedIfAvailable<ExternalAAWrapperPass>(); 963 } 964