1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/ConstantRange.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/KnownBits.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <cstdlib> 62 #include <utility> 63 64 #define DEBUG_TYPE "basicaa" 65 66 using namespace llvm; 67 68 /// Enable analysis of recursive PHI nodes. 69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 70 cl::init(true)); 71 72 /// SearchLimitReached / SearchTimes shows how often the limit of 73 /// to decompose GEPs is reached. It will affect the precision 74 /// of basic alias analysis. 75 STATISTIC(SearchLimitReached, "Number of times the limit to " 76 "decompose GEPs is reached"); 77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 78 79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 81 /// careful with value equivalence. We use reachability to make sure a value 82 /// cannot be involved in a cycle. 83 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 84 85 // The max limit of the search depth in DecomposeGEPExpression() and 86 // getUnderlyingObject(). 87 static const unsigned MaxLookupSearchDepth = 6; 88 89 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 90 FunctionAnalysisManager::Invalidator &Inv) { 91 // We don't care if this analysis itself is preserved, it has no state. But 92 // we need to check that the analyses it depends on have been. Note that we 93 // may be created without handles to some analyses and in that case don't 94 // depend on them. 95 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 96 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 97 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 98 return true; 99 100 // Otherwise this analysis result remains valid. 101 return false; 102 } 103 104 //===----------------------------------------------------------------------===// 105 // Useful predicates 106 //===----------------------------------------------------------------------===// 107 108 /// Returns true if the pointer is one which would have been considered an 109 /// escape by isNonEscapingLocalObject. 110 static bool isEscapeSource(const Value *V) { 111 if (isa<CallBase>(V)) 112 return true; 113 114 // The load case works because isNonEscapingLocalObject considers all 115 // stores to be escapes (it passes true for the StoreCaptures argument 116 // to PointerMayBeCaptured). 117 if (isa<LoadInst>(V)) 118 return true; 119 120 // The inttoptr case works because isNonEscapingLocalObject considers all 121 // means of converting or equating a pointer to an int (ptrtoint, ptr store 122 // which could be followed by an integer load, ptr<->int compare) as 123 // escaping, and objects located at well-known addresses via platform-specific 124 // means cannot be considered non-escaping local objects. 125 if (isa<IntToPtrInst>(V)) 126 return true; 127 128 return false; 129 } 130 131 /// Returns the size of the object specified by V or UnknownSize if unknown. 132 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 133 const TargetLibraryInfo &TLI, 134 bool NullIsValidLoc, 135 bool RoundToAlign = false) { 136 uint64_t Size; 137 ObjectSizeOpts Opts; 138 Opts.RoundToAlign = RoundToAlign; 139 Opts.NullIsUnknownSize = NullIsValidLoc; 140 if (getObjectSize(V, Size, DL, &TLI, Opts)) 141 return Size; 142 return MemoryLocation::UnknownSize; 143 } 144 145 /// Returns true if we can prove that the object specified by V is smaller than 146 /// Size. 147 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 148 const DataLayout &DL, 149 const TargetLibraryInfo &TLI, 150 bool NullIsValidLoc) { 151 // Note that the meanings of the "object" are slightly different in the 152 // following contexts: 153 // c1: llvm::getObjectSize() 154 // c2: llvm.objectsize() intrinsic 155 // c3: isObjectSmallerThan() 156 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 157 // refers to the "entire object". 158 // 159 // Consider this example: 160 // char *p = (char*)malloc(100) 161 // char *q = p+80; 162 // 163 // In the context of c1 and c2, the "object" pointed by q refers to the 164 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 165 // 166 // However, in the context of c3, the "object" refers to the chunk of memory 167 // being allocated. So, the "object" has 100 bytes, and q points to the middle 168 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 169 // parameter, before the llvm::getObjectSize() is called to get the size of 170 // entire object, we should: 171 // - either rewind the pointer q to the base-address of the object in 172 // question (in this case rewind to p), or 173 // - just give up. It is up to caller to make sure the pointer is pointing 174 // to the base address the object. 175 // 176 // We go for 2nd option for simplicity. 177 if (!isIdentifiedObject(V)) 178 return false; 179 180 // This function needs to use the aligned object size because we allow 181 // reads a bit past the end given sufficient alignment. 182 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 183 /*RoundToAlign*/ true); 184 185 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 186 } 187 188 /// Return the minimal extent from \p V to the end of the underlying object, 189 /// assuming the result is used in an aliasing query. E.g., we do use the query 190 /// location size and the fact that null pointers cannot alias here. 191 static uint64_t getMinimalExtentFrom(const Value &V, 192 const LocationSize &LocSize, 193 const DataLayout &DL, 194 bool NullIsValidLoc) { 195 // If we have dereferenceability information we know a lower bound for the 196 // extent as accesses for a lower offset would be valid. We need to exclude 197 // the "or null" part if null is a valid pointer. We can ignore frees, as an 198 // access after free would be undefined behavior. 199 bool CanBeNull, CanBeFreed; 200 uint64_t DerefBytes = 201 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 202 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 203 // If queried with a precise location size, we assume that location size to be 204 // accessed, thus valid. 205 if (LocSize.isPrecise()) 206 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 207 return DerefBytes; 208 } 209 210 /// Returns true if we can prove that the object specified by V has size Size. 211 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 212 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 213 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 214 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 215 } 216 217 //===----------------------------------------------------------------------===// 218 // CaptureInfo implementations 219 //===----------------------------------------------------------------------===// 220 221 CaptureInfo::~CaptureInfo() = default; 222 223 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object, 224 const Instruction *I) { 225 return isNonEscapingLocalObject(Object, &IsCapturedCache); 226 } 227 228 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object, 229 const Instruction *I) { 230 if (!isIdentifiedFunctionLocal(Object)) 231 return false; 232 233 auto Iter = EarliestEscapes.insert({Object, nullptr}); 234 if (Iter.second) { 235 Instruction *EarliestCapture = FindEarliestCapture( 236 Object, *const_cast<Function *>(I->getFunction()), 237 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT); 238 if (EarliestCapture) { 239 auto Ins = Inst2Obj.insert({EarliestCapture, {}}); 240 Ins.first->second.push_back(Object); 241 } 242 Iter.first->second = EarliestCapture; 243 } 244 245 // No capturing instruction. 246 if (!Iter.first->second) 247 return true; 248 249 return I != Iter.first->second && 250 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI); 251 } 252 253 void EarliestEscapeInfo::removeInstruction(Instruction *I) { 254 auto Iter = Inst2Obj.find(I); 255 if (Iter != Inst2Obj.end()) { 256 for (const Value *Obj : Iter->second) 257 EarliestEscapes.erase(Obj); 258 Inst2Obj.erase(I); 259 } 260 } 261 262 //===----------------------------------------------------------------------===// 263 // GetElementPtr Instruction Decomposition and Analysis 264 //===----------------------------------------------------------------------===// 265 266 namespace { 267 /// Represents zext(sext(trunc(V))). 268 struct CastedValue { 269 const Value *V; 270 unsigned ZExtBits = 0; 271 unsigned SExtBits = 0; 272 unsigned TruncBits = 0; 273 274 explicit CastedValue(const Value *V) : V(V) {} 275 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits, 276 unsigned TruncBits) 277 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {} 278 279 unsigned getBitWidth() const { 280 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits + 281 SExtBits; 282 } 283 284 CastedValue withValue(const Value *NewV) const { 285 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits); 286 } 287 288 /// Replace V with zext(NewV) 289 CastedValue withZExtOfValue(const Value *NewV) const { 290 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 291 NewV->getType()->getPrimitiveSizeInBits(); 292 if (ExtendBy <= TruncBits) 293 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 294 295 // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) 296 ExtendBy -= TruncBits; 297 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0); 298 } 299 300 /// Replace V with sext(NewV) 301 CastedValue withSExtOfValue(const Value *NewV) const { 302 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 303 NewV->getType()->getPrimitiveSizeInBits(); 304 if (ExtendBy <= TruncBits) 305 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 306 307 // zext(sext(sext(NewV))) 308 ExtendBy -= TruncBits; 309 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0); 310 } 311 312 APInt evaluateWith(APInt N) const { 313 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 314 "Incompatible bit width"); 315 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits); 316 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 317 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 318 return N; 319 } 320 321 ConstantRange evaluateWith(ConstantRange N) const { 322 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 323 "Incompatible bit width"); 324 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits); 325 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits); 326 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits); 327 return N; 328 } 329 330 bool canDistributeOver(bool NUW, bool NSW) const { 331 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) 332 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) 333 // trunc(x op y) == trunc(x) op trunc(y) 334 return (!ZExtBits || NUW) && (!SExtBits || NSW); 335 } 336 337 bool hasSameCastsAs(const CastedValue &Other) const { 338 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits && 339 TruncBits == Other.TruncBits; 340 } 341 }; 342 343 /// Represents zext(sext(trunc(V))) * Scale + Offset. 344 struct LinearExpression { 345 CastedValue Val; 346 APInt Scale; 347 APInt Offset; 348 349 /// True if all operations in this expression are NSW. 350 bool IsNSW; 351 352 LinearExpression(const CastedValue &Val, const APInt &Scale, 353 const APInt &Offset, bool IsNSW) 354 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {} 355 356 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) { 357 unsigned BitWidth = Val.getBitWidth(); 358 Scale = APInt(BitWidth, 1); 359 Offset = APInt(BitWidth, 0); 360 } 361 362 LinearExpression mul(const APInt &Other, bool MulIsNSW) const { 363 // The check for zero offset is necessary, because generally 364 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z). 365 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero())); 366 return LinearExpression(Val, Scale * Other, Offset * Other, NSW); 367 } 368 }; 369 } 370 371 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 372 /// B are constant integers. 373 static LinearExpression GetLinearExpression( 374 const CastedValue &Val, const DataLayout &DL, unsigned Depth, 375 AssumptionCache *AC, DominatorTree *DT) { 376 // Limit our recursion depth. 377 if (Depth == 6) 378 return Val; 379 380 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V)) 381 return LinearExpression(Val, APInt(Val.getBitWidth(), 0), 382 Val.evaluateWith(Const->getValue()), true); 383 384 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) { 385 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 386 APInt RHS = Val.evaluateWith(RHSC->getValue()); 387 // The only non-OBO case we deal with is or, and only limited to the 388 // case where it is both nuw and nsw. 389 bool NUW = true, NSW = true; 390 if (isa<OverflowingBinaryOperator>(BOp)) { 391 NUW &= BOp->hasNoUnsignedWrap(); 392 NSW &= BOp->hasNoSignedWrap(); 393 } 394 if (!Val.canDistributeOver(NUW, NSW)) 395 return Val; 396 397 // While we can distribute over trunc, we cannot preserve nowrap flags 398 // in that case. 399 if (Val.TruncBits) 400 NUW = NSW = false; 401 402 LinearExpression E(Val); 403 switch (BOp->getOpcode()) { 404 default: 405 // We don't understand this instruction, so we can't decompose it any 406 // further. 407 return Val; 408 case Instruction::Or: 409 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 410 // analyze it. 411 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 412 BOp, DT)) 413 return Val; 414 415 LLVM_FALLTHROUGH; 416 case Instruction::Add: { 417 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 418 Depth + 1, AC, DT); 419 E.Offset += RHS; 420 E.IsNSW &= NSW; 421 break; 422 } 423 case Instruction::Sub: { 424 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 425 Depth + 1, AC, DT); 426 E.Offset -= RHS; 427 E.IsNSW &= NSW; 428 break; 429 } 430 case Instruction::Mul: 431 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 432 Depth + 1, AC, DT) 433 .mul(RHS, NSW); 434 break; 435 case Instruction::Shl: 436 // We're trying to linearize an expression of the kind: 437 // shl i8 -128, 36 438 // where the shift count exceeds the bitwidth of the type. 439 // We can't decompose this further (the expression would return 440 // a poison value). 441 if (RHS.getLimitedValue() > Val.getBitWidth()) 442 return Val; 443 444 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 445 Depth + 1, AC, DT); 446 E.Offset <<= RHS.getLimitedValue(); 447 E.Scale <<= RHS.getLimitedValue(); 448 E.IsNSW &= NSW; 449 break; 450 } 451 return E; 452 } 453 } 454 455 if (isa<ZExtInst>(Val.V)) 456 return GetLinearExpression( 457 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 458 DL, Depth + 1, AC, DT); 459 460 if (isa<SExtInst>(Val.V)) 461 return GetLinearExpression( 462 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 463 DL, Depth + 1, AC, DT); 464 465 return Val; 466 } 467 468 /// To ensure a pointer offset fits in an integer of size IndexSize 469 /// (in bits) when that size is smaller than the maximum index size. This is 470 /// an issue, for example, in particular for 32b pointers with negative indices 471 /// that rely on two's complement wrap-arounds for precise alias information 472 /// where the maximum index size is 64b. 473 static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) { 474 assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!"); 475 unsigned ShiftBits = Offset.getBitWidth() - IndexSize; 476 return (Offset << ShiftBits).ashr(ShiftBits); 477 } 478 479 namespace { 480 // A linear transformation of a Value; this class represents 481 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale. 482 struct VariableGEPIndex { 483 CastedValue Val; 484 APInt Scale; 485 486 // Context instruction to use when querying information about this index. 487 const Instruction *CxtI; 488 489 /// True if all operations in this expression are NSW. 490 bool IsNSW; 491 492 void dump() const { 493 print(dbgs()); 494 dbgs() << "\n"; 495 } 496 void print(raw_ostream &OS) const { 497 OS << "(V=" << Val.V->getName() 498 << ", zextbits=" << Val.ZExtBits 499 << ", sextbits=" << Val.SExtBits 500 << ", truncbits=" << Val.TruncBits 501 << ", scale=" << Scale << ")"; 502 } 503 }; 504 } 505 506 // Represents the internal structure of a GEP, decomposed into a base pointer, 507 // constant offsets, and variable scaled indices. 508 struct BasicAAResult::DecomposedGEP { 509 // Base pointer of the GEP 510 const Value *Base; 511 // Total constant offset from base. 512 APInt Offset; 513 // Scaled variable (non-constant) indices. 514 SmallVector<VariableGEPIndex, 4> VarIndices; 515 // Are all operations inbounds GEPs or non-indexing operations? 516 // (None iff expression doesn't involve any geps) 517 Optional<bool> InBounds; 518 519 void dump() const { 520 print(dbgs()); 521 dbgs() << "\n"; 522 } 523 void print(raw_ostream &OS) const { 524 OS << "(DecomposedGEP Base=" << Base->getName() 525 << ", Offset=" << Offset 526 << ", VarIndices=["; 527 for (size_t i = 0; i < VarIndices.size(); i++) { 528 if (i != 0) 529 OS << ", "; 530 VarIndices[i].print(OS); 531 } 532 OS << "])"; 533 } 534 }; 535 536 537 /// If V is a symbolic pointer expression, decompose it into a base pointer 538 /// with a constant offset and a number of scaled symbolic offsets. 539 /// 540 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 541 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 542 /// specified amount, but which may have other unrepresented high bits. As 543 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 544 BasicAAResult::DecomposedGEP 545 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 546 AssumptionCache *AC, DominatorTree *DT) { 547 // Limit recursion depth to limit compile time in crazy cases. 548 unsigned MaxLookup = MaxLookupSearchDepth; 549 SearchTimes++; 550 const Instruction *CxtI = dyn_cast<Instruction>(V); 551 552 unsigned MaxIndexSize = DL.getMaxIndexSizeInBits(); 553 DecomposedGEP Decomposed; 554 Decomposed.Offset = APInt(MaxIndexSize, 0); 555 do { 556 // See if this is a bitcast or GEP. 557 const Operator *Op = dyn_cast<Operator>(V); 558 if (!Op) { 559 // The only non-operator case we can handle are GlobalAliases. 560 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 561 if (!GA->isInterposable()) { 562 V = GA->getAliasee(); 563 continue; 564 } 565 } 566 Decomposed.Base = V; 567 return Decomposed; 568 } 569 570 if (Op->getOpcode() == Instruction::BitCast || 571 Op->getOpcode() == Instruction::AddrSpaceCast) { 572 V = Op->getOperand(0); 573 continue; 574 } 575 576 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 577 if (!GEPOp) { 578 if (const auto *PHI = dyn_cast<PHINode>(V)) { 579 // Look through single-arg phi nodes created by LCSSA. 580 if (PHI->getNumIncomingValues() == 1) { 581 V = PHI->getIncomingValue(0); 582 continue; 583 } 584 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 585 // CaptureTracking can know about special capturing properties of some 586 // intrinsics like launder.invariant.group, that can't be expressed with 587 // the attributes, but have properties like returning aliasing pointer. 588 // Because some analysis may assume that nocaptured pointer is not 589 // returned from some special intrinsic (because function would have to 590 // be marked with returns attribute), it is crucial to use this function 591 // because it should be in sync with CaptureTracking. Not using it may 592 // cause weird miscompilations where 2 aliasing pointers are assumed to 593 // noalias. 594 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 595 V = RP; 596 continue; 597 } 598 } 599 600 Decomposed.Base = V; 601 return Decomposed; 602 } 603 604 // Track whether we've seen at least one in bounds gep, and if so, whether 605 // all geps parsed were in bounds. 606 if (Decomposed.InBounds == None) 607 Decomposed.InBounds = GEPOp->isInBounds(); 608 else if (!GEPOp->isInBounds()) 609 Decomposed.InBounds = false; 610 611 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized"); 612 613 // Don't attempt to analyze GEPs if index scale is not a compile-time 614 // constant. 615 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 616 Decomposed.Base = V; 617 return Decomposed; 618 } 619 620 unsigned AS = GEPOp->getPointerAddressSpace(); 621 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 622 gep_type_iterator GTI = gep_type_begin(GEPOp); 623 unsigned IndexSize = DL.getIndexSizeInBits(AS); 624 // Assume all GEP operands are constants until proven otherwise. 625 bool GepHasConstantOffset = true; 626 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 627 I != E; ++I, ++GTI) { 628 const Value *Index = *I; 629 // Compute the (potentially symbolic) offset in bytes for this index. 630 if (StructType *STy = GTI.getStructTypeOrNull()) { 631 // For a struct, add the member offset. 632 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 633 if (FieldNo == 0) 634 continue; 635 636 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 637 continue; 638 } 639 640 // For an array/pointer, add the element offset, explicitly scaled. 641 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 642 if (CIdx->isZero()) 643 continue; 644 Decomposed.Offset += 645 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 646 CIdx->getValue().sextOrTrunc(MaxIndexSize); 647 continue; 648 } 649 650 GepHasConstantOffset = false; 651 652 // If the integer type is smaller than the index size, it is implicitly 653 // sign extended or truncated to index size. 654 unsigned Width = Index->getType()->getIntegerBitWidth(); 655 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0; 656 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0; 657 LinearExpression LE = GetLinearExpression( 658 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT); 659 660 // Scale by the type size. 661 unsigned TypeSize = 662 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize(); 663 LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds()); 664 Decomposed.Offset += LE.Offset.sextOrSelf(MaxIndexSize); 665 APInt Scale = LE.Scale.sextOrSelf(MaxIndexSize); 666 667 // If we already had an occurrence of this index variable, merge this 668 // scale into it. For example, we want to handle: 669 // A[x][x] -> x*16 + x*4 -> x*20 670 // This also ensures that 'x' only appears in the index list once. 671 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 672 if (Decomposed.VarIndices[i].Val.V == LE.Val.V && 673 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) { 674 Scale += Decomposed.VarIndices[i].Scale; 675 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 676 break; 677 } 678 } 679 680 // Make sure that we have a scale that makes sense for this target's 681 // index size. 682 Scale = adjustToIndexSize(Scale, IndexSize); 683 684 if (!!Scale) { 685 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW}; 686 Decomposed.VarIndices.push_back(Entry); 687 } 688 } 689 690 // Take care of wrap-arounds 691 if (GepHasConstantOffset) 692 Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize); 693 694 // Analyze the base pointer next. 695 V = GEPOp->getOperand(0); 696 } while (--MaxLookup); 697 698 // If the chain of expressions is too deep, just return early. 699 Decomposed.Base = V; 700 SearchLimitReached++; 701 return Decomposed; 702 } 703 704 /// Returns whether the given pointer value points to memory that is local to 705 /// the function, with global constants being considered local to all 706 /// functions. 707 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 708 AAQueryInfo &AAQI, bool OrLocal) { 709 assert(Visited.empty() && "Visited must be cleared after use!"); 710 711 unsigned MaxLookup = 8; 712 SmallVector<const Value *, 16> Worklist; 713 Worklist.push_back(Loc.Ptr); 714 do { 715 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 716 if (!Visited.insert(V).second) { 717 Visited.clear(); 718 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 719 } 720 721 // An alloca instruction defines local memory. 722 if (OrLocal && isa<AllocaInst>(V)) 723 continue; 724 725 // A global constant counts as local memory for our purposes. 726 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 727 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 728 // global to be marked constant in some modules and non-constant in 729 // others. GV may even be a declaration, not a definition. 730 if (!GV->isConstant()) { 731 Visited.clear(); 732 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 733 } 734 continue; 735 } 736 737 // If both select values point to local memory, then so does the select. 738 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 739 Worklist.push_back(SI->getTrueValue()); 740 Worklist.push_back(SI->getFalseValue()); 741 continue; 742 } 743 744 // If all values incoming to a phi node point to local memory, then so does 745 // the phi. 746 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 747 // Don't bother inspecting phi nodes with many operands. 748 if (PN->getNumIncomingValues() > MaxLookup) { 749 Visited.clear(); 750 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 751 } 752 append_range(Worklist, PN->incoming_values()); 753 continue; 754 } 755 756 // Otherwise be conservative. 757 Visited.clear(); 758 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 759 } while (!Worklist.empty() && --MaxLookup); 760 761 Visited.clear(); 762 return Worklist.empty(); 763 } 764 765 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 766 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 767 return II && II->getIntrinsicID() == IID; 768 } 769 770 /// Returns the behavior when calling the given call site. 771 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 772 if (Call->doesNotAccessMemory()) 773 // Can't do better than this. 774 return FMRB_DoesNotAccessMemory; 775 776 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 777 778 // If the callsite knows it only reads memory, don't return worse 779 // than that. 780 if (Call->onlyReadsMemory()) 781 Min = FMRB_OnlyReadsMemory; 782 else if (Call->doesNotReadMemory()) 783 Min = FMRB_OnlyWritesMemory; 784 785 if (Call->onlyAccessesArgMemory()) 786 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 787 else if (Call->onlyAccessesInaccessibleMemory()) 788 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 789 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 790 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 791 792 // If the call has operand bundles then aliasing attributes from the function 793 // it calls do not directly apply to the call. This can be made more precise 794 // in the future. 795 if (!Call->hasOperandBundles()) 796 if (const Function *F = Call->getCalledFunction()) 797 Min = 798 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 799 800 return Min; 801 } 802 803 /// Returns the behavior when calling the given function. For use when the call 804 /// site is not known. 805 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 806 // If the function declares it doesn't access memory, we can't do better. 807 if (F->doesNotAccessMemory()) 808 return FMRB_DoesNotAccessMemory; 809 810 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 811 812 // If the function declares it only reads memory, go with that. 813 if (F->onlyReadsMemory()) 814 Min = FMRB_OnlyReadsMemory; 815 else if (F->doesNotReadMemory()) 816 Min = FMRB_OnlyWritesMemory; 817 818 if (F->onlyAccessesArgMemory()) 819 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 820 else if (F->onlyAccessesInaccessibleMemory()) 821 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 822 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 823 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 824 825 return Min; 826 } 827 828 /// Returns true if this is a writeonly (i.e Mod only) parameter. 829 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 830 const TargetLibraryInfo &TLI) { 831 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 832 return true; 833 834 // We can bound the aliasing properties of memset_pattern16 just as we can 835 // for memcpy/memset. This is particularly important because the 836 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 837 // whenever possible. 838 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 839 // attributes. 840 LibFunc F; 841 if (Call->getCalledFunction() && 842 TLI.getLibFunc(*Call->getCalledFunction(), F) && 843 F == LibFunc_memset_pattern16 && TLI.has(F)) 844 if (ArgIdx == 0) 845 return true; 846 847 // TODO: memset_pattern4, memset_pattern8 848 // TODO: _chk variants 849 // TODO: strcmp, strcpy 850 851 return false; 852 } 853 854 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 855 unsigned ArgIdx) { 856 // Checking for known builtin intrinsics and target library functions. 857 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 858 return ModRefInfo::Mod; 859 860 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 861 return ModRefInfo::Ref; 862 863 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 864 return ModRefInfo::NoModRef; 865 866 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 867 } 868 869 #ifndef NDEBUG 870 static const Function *getParent(const Value *V) { 871 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 872 if (!inst->getParent()) 873 return nullptr; 874 return inst->getParent()->getParent(); 875 } 876 877 if (const Argument *arg = dyn_cast<Argument>(V)) 878 return arg->getParent(); 879 880 return nullptr; 881 } 882 883 static bool notDifferentParent(const Value *O1, const Value *O2) { 884 885 const Function *F1 = getParent(O1); 886 const Function *F2 = getParent(O2); 887 888 return !F1 || !F2 || F1 == F2; 889 } 890 #endif 891 892 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 893 const MemoryLocation &LocB, 894 AAQueryInfo &AAQI) { 895 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 896 "BasicAliasAnalysis doesn't support interprocedural queries."); 897 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI); 898 } 899 900 /// Checks to see if the specified callsite can clobber the specified memory 901 /// object. 902 /// 903 /// Since we only look at local properties of this function, we really can't 904 /// say much about this query. We do, however, use simple "address taken" 905 /// analysis on local objects. 906 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 907 const MemoryLocation &Loc, 908 AAQueryInfo &AAQI) { 909 assert(notDifferentParent(Call, Loc.Ptr) && 910 "AliasAnalysis query involving multiple functions!"); 911 912 const Value *Object = getUnderlyingObject(Loc.Ptr); 913 914 // Calls marked 'tail' cannot read or write allocas from the current frame 915 // because the current frame might be destroyed by the time they run. However, 916 // a tail call may use an alloca with byval. Calling with byval copies the 917 // contents of the alloca into argument registers or stack slots, so there is 918 // no lifetime issue. 919 if (isa<AllocaInst>(Object)) 920 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 921 if (CI->isTailCall() && 922 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 923 return ModRefInfo::NoModRef; 924 925 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 926 // modify them even though the alloca is not escaped. 927 if (auto *AI = dyn_cast<AllocaInst>(Object)) 928 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 929 return ModRefInfo::Mod; 930 931 // If the pointer is to a locally allocated object that does not escape, 932 // then the call can not mod/ref the pointer unless the call takes the pointer 933 // as an argument, and itself doesn't capture it. 934 if (!isa<Constant>(Object) && Call != Object && 935 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) { 936 937 // Optimistically assume that call doesn't touch Object and check this 938 // assumption in the following loop. 939 ModRefInfo Result = ModRefInfo::NoModRef; 940 bool IsMustAlias = true; 941 942 unsigned OperandNo = 0; 943 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 944 CI != CE; ++CI, ++OperandNo) { 945 // Only look at the no-capture or byval pointer arguments. If this 946 // pointer were passed to arguments that were neither of these, then it 947 // couldn't be no-capture. 948 if (!(*CI)->getType()->isPointerTy() || 949 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() && 950 !Call->isByValArgument(OperandNo))) 951 continue; 952 953 // Call doesn't access memory through this operand, so we don't care 954 // if it aliases with Object. 955 if (Call->doesNotAccessMemory(OperandNo)) 956 continue; 957 958 // If this is a no-capture pointer argument, see if we can tell that it 959 // is impossible to alias the pointer we're checking. 960 AliasResult AR = getBestAAResults().alias( 961 MemoryLocation::getBeforeOrAfter(*CI), 962 MemoryLocation::getBeforeOrAfter(Object), AAQI); 963 if (AR != AliasResult::MustAlias) 964 IsMustAlias = false; 965 // Operand doesn't alias 'Object', continue looking for other aliases 966 if (AR == AliasResult::NoAlias) 967 continue; 968 // Operand aliases 'Object', but call doesn't modify it. Strengthen 969 // initial assumption and keep looking in case if there are more aliases. 970 if (Call->onlyReadsMemory(OperandNo)) { 971 Result = setRef(Result); 972 continue; 973 } 974 // Operand aliases 'Object' but call only writes into it. 975 if (Call->doesNotReadMemory(OperandNo)) { 976 Result = setMod(Result); 977 continue; 978 } 979 // This operand aliases 'Object' and call reads and writes into it. 980 // Setting ModRef will not yield an early return below, MustAlias is not 981 // used further. 982 Result = ModRefInfo::ModRef; 983 break; 984 } 985 986 // No operand aliases, reset Must bit. Add below if at least one aliases 987 // and all aliases found are MustAlias. 988 if (isNoModRef(Result)) 989 IsMustAlias = false; 990 991 // Early return if we improved mod ref information 992 if (!isModAndRefSet(Result)) { 993 if (isNoModRef(Result)) 994 return ModRefInfo::NoModRef; 995 return IsMustAlias ? setMust(Result) : clearMust(Result); 996 } 997 } 998 999 // If the call is malloc/calloc like, we can assume that it doesn't 1000 // modify any IR visible value. This is only valid because we assume these 1001 // routines do not read values visible in the IR. TODO: Consider special 1002 // casing realloc and strdup routines which access only their arguments as 1003 // well. Or alternatively, replace all of this with inaccessiblememonly once 1004 // that's implemented fully. 1005 if (isMallocOrCallocLikeFn(Call, &TLI)) { 1006 // Be conservative if the accessed pointer may alias the allocation - 1007 // fallback to the generic handling below. 1008 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc, 1009 AAQI) == AliasResult::NoAlias) 1010 return ModRefInfo::NoModRef; 1011 } 1012 1013 // The semantics of memcpy intrinsics either exactly overlap or do not 1014 // overlap, i.e., source and destination of any given memcpy are either 1015 // no-alias or must-alias. 1016 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 1017 AliasResult SrcAA = 1018 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 1019 AliasResult DestAA = 1020 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 1021 // It's also possible for Loc to alias both src and dest, or neither. 1022 ModRefInfo rv = ModRefInfo::NoModRef; 1023 if (SrcAA != AliasResult::NoAlias) 1024 rv = setRef(rv); 1025 if (DestAA != AliasResult::NoAlias) 1026 rv = setMod(rv); 1027 return rv; 1028 } 1029 1030 // Guard intrinsics are marked as arbitrarily writing so that proper control 1031 // dependencies are maintained but they never mods any particular memory 1032 // location. 1033 // 1034 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1035 // heap state at the point the guard is issued needs to be consistent in case 1036 // the guard invokes the "deopt" continuation. 1037 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 1038 return ModRefInfo::Ref; 1039 // The same applies to deoptimize which is essentially a guard(false). 1040 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 1041 return ModRefInfo::Ref; 1042 1043 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 1044 // writing so that proper control dependencies are maintained but they never 1045 // mod any particular memory location visible to the IR. 1046 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 1047 // intrinsic is now modeled as reading memory. This prevents hoisting the 1048 // invariant.start intrinsic over stores. Consider: 1049 // *ptr = 40; 1050 // *ptr = 50; 1051 // invariant_start(ptr) 1052 // int val = *ptr; 1053 // print(val); 1054 // 1055 // This cannot be transformed to: 1056 // 1057 // *ptr = 40; 1058 // invariant_start(ptr) 1059 // *ptr = 50; 1060 // int val = *ptr; 1061 // print(val); 1062 // 1063 // The transformation will cause the second store to be ignored (based on 1064 // rules of invariant.start) and print 40, while the first program always 1065 // prints 50. 1066 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1067 return ModRefInfo::Ref; 1068 1069 // The AAResultBase base class has some smarts, lets use them. 1070 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1071 } 1072 1073 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1074 const CallBase *Call2, 1075 AAQueryInfo &AAQI) { 1076 // Guard intrinsics are marked as arbitrarily writing so that proper control 1077 // dependencies are maintained but they never mods any particular memory 1078 // location. 1079 // 1080 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1081 // heap state at the point the guard is issued needs to be consistent in case 1082 // the guard invokes the "deopt" continuation. 1083 1084 // NB! This function is *not* commutative, so we special case two 1085 // possibilities for guard intrinsics. 1086 1087 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1088 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1089 ? ModRefInfo::Ref 1090 : ModRefInfo::NoModRef; 1091 1092 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1093 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1094 ? ModRefInfo::Mod 1095 : ModRefInfo::NoModRef; 1096 1097 // The AAResultBase base class has some smarts, lets use them. 1098 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1099 } 1100 1101 /// Return true if we know V to the base address of the corresponding memory 1102 /// object. This implies that any address less than V must be out of bounds 1103 /// for the underlying object. Note that just being isIdentifiedObject() is 1104 /// not enough - For example, a negative offset from a noalias argument or call 1105 /// can be inbounds w.r.t the actual underlying object. 1106 static bool isBaseOfObject(const Value *V) { 1107 // TODO: We can handle other cases here 1108 // 1) For GC languages, arguments to functions are often required to be 1109 // base pointers. 1110 // 2) Result of allocation routines are often base pointers. Leverage TLI. 1111 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); 1112 } 1113 1114 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1115 /// another pointer. 1116 /// 1117 /// We know that V1 is a GEP, but we don't know anything about V2. 1118 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1119 /// V2. 1120 AliasResult BasicAAResult::aliasGEP( 1121 const GEPOperator *GEP1, LocationSize V1Size, 1122 const Value *V2, LocationSize V2Size, 1123 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1124 if (!V1Size.hasValue() && !V2Size.hasValue()) { 1125 // TODO: This limitation exists for compile-time reasons. Relax it if we 1126 // can avoid exponential pathological cases. 1127 if (!isa<GEPOperator>(V2)) 1128 return AliasResult::MayAlias; 1129 1130 // If both accesses have unknown size, we can only check whether the base 1131 // objects don't alias. 1132 AliasResult BaseAlias = getBestAAResults().alias( 1133 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1134 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1135 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias 1136 : AliasResult::MayAlias; 1137 } 1138 1139 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1140 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1141 1142 // Bail if we were not able to decompose anything. 1143 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2) 1144 return AliasResult::MayAlias; 1145 1146 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1147 // symbolic difference. 1148 subtractDecomposedGEPs(DecompGEP1, DecompGEP2); 1149 1150 // If an inbounds GEP would have to start from an out of bounds address 1151 // for the two to alias, then we can assume noalias. 1152 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && 1153 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && 1154 isBaseOfObject(DecompGEP2.Base)) 1155 return AliasResult::NoAlias; 1156 1157 if (isa<GEPOperator>(V2)) { 1158 // Symmetric case to above. 1159 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && 1160 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && 1161 isBaseOfObject(DecompGEP1.Base)) 1162 return AliasResult::NoAlias; 1163 } 1164 1165 // For GEPs with identical offsets, we can preserve the size and AAInfo 1166 // when performing the alias check on the underlying objects. 1167 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1168 return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size), 1169 MemoryLocation(DecompGEP2.Base, V2Size), 1170 AAQI); 1171 1172 // Do the base pointers alias? 1173 AliasResult BaseAlias = getBestAAResults().alias( 1174 MemoryLocation::getBeforeOrAfter(DecompGEP1.Base), 1175 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI); 1176 1177 // If we get a No or May, then return it immediately, no amount of analysis 1178 // will improve this situation. 1179 if (BaseAlias != AliasResult::MustAlias) { 1180 assert(BaseAlias == AliasResult::NoAlias || 1181 BaseAlias == AliasResult::MayAlias); 1182 return BaseAlias; 1183 } 1184 1185 // If there is a constant difference between the pointers, but the difference 1186 // is less than the size of the associated memory object, then we know 1187 // that the objects are partially overlapping. If the difference is 1188 // greater, we know they do not overlap. 1189 if (DecompGEP1.VarIndices.empty()) { 1190 APInt &Off = DecompGEP1.Offset; 1191 1192 // Initialize for Off >= 0 (V2 <= GEP1) case. 1193 const Value *LeftPtr = V2; 1194 const Value *RightPtr = GEP1; 1195 LocationSize VLeftSize = V2Size; 1196 LocationSize VRightSize = V1Size; 1197 const bool Swapped = Off.isNegative(); 1198 1199 if (Swapped) { 1200 // Swap if we have the situation where: 1201 // + + 1202 // | BaseOffset | 1203 // ---------------->| 1204 // |-->V1Size |-------> V2Size 1205 // GEP1 V2 1206 std::swap(LeftPtr, RightPtr); 1207 std::swap(VLeftSize, VRightSize); 1208 Off = -Off; 1209 } 1210 1211 if (!VLeftSize.hasValue()) 1212 return AliasResult::MayAlias; 1213 1214 const uint64_t LSize = VLeftSize.getValue(); 1215 if (Off.ult(LSize)) { 1216 // Conservatively drop processing if a phi was visited and/or offset is 1217 // too big. 1218 AliasResult AR = AliasResult::PartialAlias; 1219 if (VRightSize.hasValue() && Off.ule(INT32_MAX) && 1220 (Off + VRightSize.getValue()).ule(LSize)) { 1221 // Memory referenced by right pointer is nested. Save the offset in 1222 // cache. Note that originally offset estimated as GEP1-V2, but 1223 // AliasResult contains the shift that represents GEP1+Offset=V2. 1224 AR.setOffset(-Off.getSExtValue()); 1225 AR.swap(Swapped); 1226 } 1227 return AR; 1228 } 1229 return AliasResult::NoAlias; 1230 } 1231 1232 // We need to know both acess sizes for all the following heuristics. 1233 if (!V1Size.hasValue() || !V2Size.hasValue()) 1234 return AliasResult::MayAlias; 1235 1236 APInt GCD; 1237 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset); 1238 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1239 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i]; 1240 const APInt &Scale = Index.Scale; 1241 APInt ScaleForGCD = Scale; 1242 if (!Index.IsNSW) 1243 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(), 1244 Scale.countTrailingZeros()); 1245 1246 if (i == 0) 1247 GCD = ScaleForGCD.abs(); 1248 else 1249 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs()); 1250 1251 ConstantRange CR = 1252 computeConstantRange(Index.Val.V, true, &AC, Index.CxtI); 1253 KnownBits Known = 1254 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT); 1255 CR = CR.intersectWith( 1256 ConstantRange::fromKnownBits(Known, /* Signed */ true), 1257 ConstantRange::Signed); 1258 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth()); 1259 1260 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() && 1261 "Bit widths are normalized to MaxIndexSize"); 1262 if (Index.IsNSW) 1263 OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale))); 1264 else 1265 OffsetRange = OffsetRange.add(CR.smul_fast(ConstantRange(Scale))); 1266 } 1267 1268 // We now have accesses at two offsets from the same base: 1269 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1270 // 2. 0 with size V2Size 1271 // Using arithmetic modulo GCD, the accesses are at 1272 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1273 // into the range [V2Size..GCD), then we know they cannot overlap. 1274 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1275 if (ModOffset.isNegative()) 1276 ModOffset += GCD; // We want mod, not rem. 1277 if (ModOffset.uge(V2Size.getValue()) && 1278 (GCD - ModOffset).uge(V1Size.getValue())) 1279 return AliasResult::NoAlias; 1280 1281 // Compute ranges of potentially accessed bytes for both accesses. If the 1282 // interseciton is empty, there can be no overlap. 1283 unsigned BW = OffsetRange.getBitWidth(); 1284 ConstantRange Range1 = OffsetRange.add( 1285 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue()))); 1286 ConstantRange Range2 = 1287 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue())); 1288 if (Range1.intersectWith(Range2).isEmptySet()) 1289 return AliasResult::NoAlias; 1290 1291 // Try to determine the range of values for VarIndex such that 1292 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex. 1293 Optional<APInt> MinAbsVarIndex; 1294 if (DecompGEP1.VarIndices.size() == 1) { 1295 // VarIndex = Scale*V. 1296 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; 1297 if (Var.Val.TruncBits == 0 && 1298 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) { 1299 // If V != 0 then abs(VarIndex) >= abs(Scale). 1300 MinAbsVarIndex = Var.Scale.abs(); 1301 } 1302 } else if (DecompGEP1.VarIndices.size() == 2) { 1303 // VarIndex = Scale*V0 + (-Scale)*V1. 1304 // If V0 != V1 then abs(VarIndex) >= abs(Scale). 1305 // Check that VisitedPhiBBs is empty, to avoid reasoning about 1306 // inequality of values across loop iterations. 1307 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; 1308 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; 1309 if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 && 1310 Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() && 1311 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr, 1312 DT)) 1313 MinAbsVarIndex = Var0.Scale.abs(); 1314 } 1315 1316 if (MinAbsVarIndex) { 1317 // The constant offset will have added at least +/-MinAbsVarIndex to it. 1318 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; 1319 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; 1320 // We know that Offset <= OffsetLo || Offset >= OffsetHi 1321 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && 1322 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) 1323 return AliasResult::NoAlias; 1324 } 1325 1326 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT)) 1327 return AliasResult::NoAlias; 1328 1329 // Statically, we can see that the base objects are the same, but the 1330 // pointers have dynamic offsets which we can't resolve. And none of our 1331 // little tricks above worked. 1332 return AliasResult::MayAlias; 1333 } 1334 1335 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1336 // If the results agree, take it. 1337 if (A == B) 1338 return A; 1339 // A mix of PartialAlias and MustAlias is PartialAlias. 1340 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || 1341 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) 1342 return AliasResult::PartialAlias; 1343 // Otherwise, we don't know anything. 1344 return AliasResult::MayAlias; 1345 } 1346 1347 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1348 /// against another. 1349 AliasResult 1350 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1351 const Value *V2, LocationSize V2Size, 1352 AAQueryInfo &AAQI) { 1353 // If the values are Selects with the same condition, we can do a more precise 1354 // check: just check for aliases between the values on corresponding arms. 1355 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1356 if (SI->getCondition() == SI2->getCondition()) { 1357 AliasResult Alias = getBestAAResults().alias( 1358 MemoryLocation(SI->getTrueValue(), SISize), 1359 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); 1360 if (Alias == AliasResult::MayAlias) 1361 return AliasResult::MayAlias; 1362 AliasResult ThisAlias = getBestAAResults().alias( 1363 MemoryLocation(SI->getFalseValue(), SISize), 1364 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); 1365 return MergeAliasResults(ThisAlias, Alias); 1366 } 1367 1368 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1369 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1370 AliasResult Alias = getBestAAResults().alias( 1371 MemoryLocation(V2, V2Size), 1372 MemoryLocation(SI->getTrueValue(), SISize), AAQI); 1373 if (Alias == AliasResult::MayAlias) 1374 return AliasResult::MayAlias; 1375 1376 AliasResult ThisAlias = getBestAAResults().alias( 1377 MemoryLocation(V2, V2Size), 1378 MemoryLocation(SI->getFalseValue(), SISize), AAQI); 1379 return MergeAliasResults(ThisAlias, Alias); 1380 } 1381 1382 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1383 /// another. 1384 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1385 const Value *V2, LocationSize V2Size, 1386 AAQueryInfo &AAQI) { 1387 if (!PN->getNumIncomingValues()) 1388 return AliasResult::NoAlias; 1389 // If the values are PHIs in the same block, we can do a more precise 1390 // as well as efficient check: just check for aliases between the values 1391 // on corresponding edges. 1392 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1393 if (PN2->getParent() == PN->getParent()) { 1394 Optional<AliasResult> Alias; 1395 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1396 AliasResult ThisAlias = getBestAAResults().alias( 1397 MemoryLocation(PN->getIncomingValue(i), PNSize), 1398 MemoryLocation( 1399 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size), 1400 AAQI); 1401 if (Alias) 1402 *Alias = MergeAliasResults(*Alias, ThisAlias); 1403 else 1404 Alias = ThisAlias; 1405 if (*Alias == AliasResult::MayAlias) 1406 break; 1407 } 1408 return *Alias; 1409 } 1410 1411 SmallVector<Value *, 4> V1Srcs; 1412 // If a phi operand recurses back to the phi, we can still determine NoAlias 1413 // if we don't alias the underlying objects of the other phi operands, as we 1414 // know that the recursive phi needs to be based on them in some way. 1415 bool isRecursive = false; 1416 auto CheckForRecPhi = [&](Value *PV) { 1417 if (!EnableRecPhiAnalysis) 1418 return false; 1419 if (getUnderlyingObject(PV) == PN) { 1420 isRecursive = true; 1421 return true; 1422 } 1423 return false; 1424 }; 1425 1426 if (PV) { 1427 // If we have PhiValues then use it to get the underlying phi values. 1428 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1429 // If we have more phi values than the search depth then return MayAlias 1430 // conservatively to avoid compile time explosion. The worst possible case 1431 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1432 // where 'm' and 'n' are the number of PHI sources. 1433 if (PhiValueSet.size() > MaxLookupSearchDepth) 1434 return AliasResult::MayAlias; 1435 // Add the values to V1Srcs 1436 for (Value *PV1 : PhiValueSet) { 1437 if (CheckForRecPhi(PV1)) 1438 continue; 1439 V1Srcs.push_back(PV1); 1440 } 1441 } else { 1442 // If we don't have PhiInfo then just look at the operands of the phi itself 1443 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1444 SmallPtrSet<Value *, 4> UniqueSrc; 1445 Value *OnePhi = nullptr; 1446 for (Value *PV1 : PN->incoming_values()) { 1447 if (isa<PHINode>(PV1)) { 1448 if (OnePhi && OnePhi != PV1) { 1449 // To control potential compile time explosion, we choose to be 1450 // conserviate when we have more than one Phi input. It is important 1451 // that we handle the single phi case as that lets us handle LCSSA 1452 // phi nodes and (combined with the recursive phi handling) simple 1453 // pointer induction variable patterns. 1454 return AliasResult::MayAlias; 1455 } 1456 OnePhi = PV1; 1457 } 1458 1459 if (CheckForRecPhi(PV1)) 1460 continue; 1461 1462 if (UniqueSrc.insert(PV1).second) 1463 V1Srcs.push_back(PV1); 1464 } 1465 1466 if (OnePhi && UniqueSrc.size() > 1) 1467 // Out of an abundance of caution, allow only the trivial lcssa and 1468 // recursive phi cases. 1469 return AliasResult::MayAlias; 1470 } 1471 1472 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1473 // value. This should only be possible in blocks unreachable from the entry 1474 // block, but return MayAlias just in case. 1475 if (V1Srcs.empty()) 1476 return AliasResult::MayAlias; 1477 1478 // If this PHI node is recursive, indicate that the pointer may be moved 1479 // across iterations. We can only prove NoAlias if different underlying 1480 // objects are involved. 1481 if (isRecursive) 1482 PNSize = LocationSize::beforeOrAfterPointer(); 1483 1484 // In the recursive alias queries below, we may compare values from two 1485 // different loop iterations. Keep track of visited phi blocks, which will 1486 // be used when determining value equivalence. 1487 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1488 auto _ = make_scope_exit([&]() { 1489 if (BlockInserted) 1490 VisitedPhiBBs.erase(PN->getParent()); 1491 }); 1492 1493 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1494 // have been cached earlier may no longer be valid. Perform recursive queries 1495 // with a new AAQueryInfo. 1496 AAQueryInfo NewAAQI = AAQI.withEmptyCache(); 1497 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1498 1499 AliasResult Alias = getBestAAResults().alias( 1500 MemoryLocation(V2, V2Size), 1501 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI); 1502 1503 // Early exit if the check of the first PHI source against V2 is MayAlias. 1504 // Other results are not possible. 1505 if (Alias == AliasResult::MayAlias) 1506 return AliasResult::MayAlias; 1507 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1508 // remain valid to all elements and needs to conservatively return MayAlias. 1509 if (isRecursive && Alias != AliasResult::NoAlias) 1510 return AliasResult::MayAlias; 1511 1512 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1513 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1514 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1515 Value *V = V1Srcs[i]; 1516 1517 AliasResult ThisAlias = getBestAAResults().alias( 1518 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI); 1519 Alias = MergeAliasResults(ThisAlias, Alias); 1520 if (Alias == AliasResult::MayAlias) 1521 break; 1522 } 1523 1524 return Alias; 1525 } 1526 1527 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1528 /// array references. 1529 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1530 const Value *V2, LocationSize V2Size, 1531 AAQueryInfo &AAQI) { 1532 // If either of the memory references is empty, it doesn't matter what the 1533 // pointer values are. 1534 if (V1Size.isZero() || V2Size.isZero()) 1535 return AliasResult::NoAlias; 1536 1537 // Strip off any casts if they exist. 1538 V1 = V1->stripPointerCastsForAliasAnalysis(); 1539 V2 = V2->stripPointerCastsForAliasAnalysis(); 1540 1541 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1542 // value for undef that aliases nothing in the program. 1543 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1544 return AliasResult::NoAlias; 1545 1546 // Are we checking for alias of the same value? 1547 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1548 // different iterations. We must therefore make sure that this is not the 1549 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1550 // happen by looking at the visited phi nodes and making sure they cannot 1551 // reach the value. 1552 if (isValueEqualInPotentialCycles(V1, V2)) 1553 return AliasResult::MustAlias; 1554 1555 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1556 return AliasResult::NoAlias; // Scalars cannot alias each other 1557 1558 // Figure out what objects these things are pointing to if we can. 1559 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1560 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1561 1562 // Null values in the default address space don't point to any object, so they 1563 // don't alias any other pointer. 1564 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1565 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1566 return AliasResult::NoAlias; 1567 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1568 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1569 return AliasResult::NoAlias; 1570 1571 if (O1 != O2) { 1572 // If V1/V2 point to two different objects, we know that we have no alias. 1573 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1574 return AliasResult::NoAlias; 1575 1576 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1577 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1578 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1579 return AliasResult::NoAlias; 1580 1581 // Function arguments can't alias with things that are known to be 1582 // unambigously identified at the function level. 1583 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1584 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1585 return AliasResult::NoAlias; 1586 1587 // If one pointer is the result of a call/invoke or load and the other is a 1588 // non-escaping local object within the same function, then we know the 1589 // object couldn't escape to a point where the call could return it. 1590 // 1591 // Note that if the pointers are in different functions, there are a 1592 // variety of complications. A call with a nocapture argument may still 1593 // temporary store the nocapture argument's value in a temporary memory 1594 // location if that memory location doesn't escape. Or it may pass a 1595 // nocapture value to other functions as long as they don't capture it. 1596 if (isEscapeSource(O1) && 1597 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1))) 1598 return AliasResult::NoAlias; 1599 if (isEscapeSource(O2) && 1600 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2))) 1601 return AliasResult::NoAlias; 1602 } 1603 1604 // If the size of one access is larger than the entire object on the other 1605 // side, then we know such behavior is undefined and can assume no alias. 1606 bool NullIsValidLocation = NullPointerIsDefined(&F); 1607 if ((isObjectSmallerThan( 1608 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1609 TLI, NullIsValidLocation)) || 1610 (isObjectSmallerThan( 1611 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1612 TLI, NullIsValidLocation))) 1613 return AliasResult::NoAlias; 1614 1615 // If one the accesses may be before the accessed pointer, canonicalize this 1616 // by using unknown after-pointer sizes for both accesses. This is 1617 // equivalent, because regardless of which pointer is lower, one of them 1618 // will always came after the other, as long as the underlying objects aren't 1619 // disjoint. We do this so that the rest of BasicAA does not have to deal 1620 // with accesses before the base pointer, and to improve cache utilization by 1621 // merging equivalent states. 1622 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1623 V1Size = LocationSize::afterPointer(); 1624 V2Size = LocationSize::afterPointer(); 1625 } 1626 1627 // FIXME: If this depth limit is hit, then we may cache sub-optimal results 1628 // for recursive queries. For this reason, this limit is chosen to be large 1629 // enough to be very rarely hit, while still being small enough to avoid 1630 // stack overflows. 1631 if (AAQI.Depth >= 512) 1632 return AliasResult::MayAlias; 1633 1634 // Check the cache before climbing up use-def chains. This also terminates 1635 // otherwise infinitely recursive queries. 1636 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size}); 1637 const bool Swapped = V1 > V2; 1638 if (Swapped) 1639 std::swap(Locs.first, Locs.second); 1640 const auto &Pair = AAQI.AliasCache.try_emplace( 1641 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0}); 1642 if (!Pair.second) { 1643 auto &Entry = Pair.first->second; 1644 if (!Entry.isDefinitive()) { 1645 // Remember that we used an assumption. 1646 ++Entry.NumAssumptionUses; 1647 ++AAQI.NumAssumptionUses; 1648 } 1649 // Cache contains sorted {V1,V2} pairs but we should return original order. 1650 auto Result = Entry.Result; 1651 Result.swap(Swapped); 1652 return Result; 1653 } 1654 1655 int OrigNumAssumptionUses = AAQI.NumAssumptionUses; 1656 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); 1657 AliasResult Result = 1658 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); 1659 1660 auto It = AAQI.AliasCache.find(Locs); 1661 assert(It != AAQI.AliasCache.end() && "Must be in cache"); 1662 auto &Entry = It->second; 1663 1664 // Check whether a NoAlias assumption has been used, but disproven. 1665 bool AssumptionDisproven = 1666 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; 1667 if (AssumptionDisproven) 1668 Result = AliasResult::MayAlias; 1669 1670 // This is a definitive result now, when considered as a root query. 1671 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; 1672 Entry.Result = Result; 1673 // Cache contains sorted {V1,V2} pairs. 1674 Entry.Result.swap(Swapped); 1675 Entry.NumAssumptionUses = -1; 1676 1677 // If the assumption has been disproven, remove any results that may have 1678 // been based on this assumption. Do this after the Entry updates above to 1679 // avoid iterator invalidation. 1680 if (AssumptionDisproven) 1681 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) 1682 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); 1683 1684 // The result may still be based on assumptions higher up in the chain. 1685 // Remember it, so it can be purged from the cache later. 1686 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && 1687 Result != AliasResult::MayAlias) 1688 AAQI.AssumptionBasedResults.push_back(Locs); 1689 return Result; 1690 } 1691 1692 AliasResult BasicAAResult::aliasCheckRecursive( 1693 const Value *V1, LocationSize V1Size, 1694 const Value *V2, LocationSize V2Size, 1695 AAQueryInfo &AAQI, const Value *O1, const Value *O2) { 1696 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1697 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI); 1698 if (Result != AliasResult::MayAlias) 1699 return Result; 1700 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1701 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI); 1702 if (Result != AliasResult::MayAlias) 1703 return Result; 1704 } 1705 1706 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1707 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI); 1708 if (Result != AliasResult::MayAlias) 1709 return Result; 1710 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1711 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI); 1712 if (Result != AliasResult::MayAlias) 1713 return Result; 1714 } 1715 1716 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1717 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI); 1718 if (Result != AliasResult::MayAlias) 1719 return Result; 1720 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1721 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI); 1722 if (Result != AliasResult::MayAlias) 1723 return Result; 1724 } 1725 1726 // If both pointers are pointing into the same object and one of them 1727 // accesses the entire object, then the accesses must overlap in some way. 1728 if (O1 == O2) { 1729 bool NullIsValidLocation = NullPointerIsDefined(&F); 1730 if (V1Size.isPrecise() && V2Size.isPrecise() && 1731 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1732 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1733 return AliasResult::PartialAlias; 1734 } 1735 1736 return AliasResult::MayAlias; 1737 } 1738 1739 /// Check whether two Values can be considered equivalent. 1740 /// 1741 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1742 /// they can not be part of a cycle in the value graph by looking at all 1743 /// visited phi nodes an making sure that the phis cannot reach the value. We 1744 /// have to do this because we are looking through phi nodes (That is we say 1745 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1746 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1747 const Value *V2) { 1748 if (V != V2) 1749 return false; 1750 1751 const Instruction *Inst = dyn_cast<Instruction>(V); 1752 if (!Inst) 1753 return true; 1754 1755 if (VisitedPhiBBs.empty()) 1756 return true; 1757 1758 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1759 return false; 1760 1761 // Make sure that the visited phis cannot reach the Value. This ensures that 1762 // the Values cannot come from different iterations of a potential cycle the 1763 // phi nodes could be involved in. 1764 for (auto *P : VisitedPhiBBs) 1765 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) 1766 return false; 1767 1768 return true; 1769 } 1770 1771 /// Computes the symbolic difference between two de-composed GEPs. 1772 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP, 1773 const DecomposedGEP &SrcGEP) { 1774 DestGEP.Offset -= SrcGEP.Offset; 1775 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) { 1776 // Find V in Dest. This is N^2, but pointer indices almost never have more 1777 // than a few variable indexes. 1778 bool Found = false; 1779 for (auto I : enumerate(DestGEP.VarIndices)) { 1780 VariableGEPIndex &Dest = I.value(); 1781 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) || 1782 !Dest.Val.hasSameCastsAs(Src.Val)) 1783 continue; 1784 1785 // If we found it, subtract off Scale V's from the entry in Dest. If it 1786 // goes to zero, remove the entry. 1787 if (Dest.Scale != Src.Scale) { 1788 Dest.Scale -= Src.Scale; 1789 Dest.IsNSW = false; 1790 } else { 1791 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index()); 1792 } 1793 Found = true; 1794 break; 1795 } 1796 1797 // If we didn't consume this entry, add it to the end of the Dest list. 1798 if (!Found) { 1799 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW}; 1800 DestGEP.VarIndices.push_back(Entry); 1801 } 1802 } 1803 } 1804 1805 bool BasicAAResult::constantOffsetHeuristic( 1806 const DecomposedGEP &GEP, LocationSize MaybeV1Size, 1807 LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) { 1808 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1809 !MaybeV2Size.hasValue()) 1810 return false; 1811 1812 const uint64_t V1Size = MaybeV1Size.getValue(); 1813 const uint64_t V2Size = MaybeV2Size.getValue(); 1814 1815 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1]; 1816 1817 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) || 1818 Var0.Scale != -Var1.Scale || 1819 Var0.Val.V->getType() != Var1.Val.V->getType()) 1820 return false; 1821 1822 // We'll strip off the Extensions of Var0 and Var1 and do another round 1823 // of GetLinearExpression decomposition. In the example above, if Var0 1824 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1825 1826 LinearExpression E0 = 1827 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT); 1828 LinearExpression E1 = 1829 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT); 1830 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) || 1831 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V)) 1832 return false; 1833 1834 // We have a hit - Var0 and Var1 only differ by a constant offset! 1835 1836 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1837 // Var1 is possible to calculate, but we're just interested in the absolute 1838 // minimum difference between the two. The minimum distance may occur due to 1839 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1840 // the minimum distance between %i and %i + 5 is 3. 1841 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; 1842 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1843 APInt MinDiffBytes = 1844 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1845 1846 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1847 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1848 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1849 // V2Size can fit in the MinDiffBytes gap. 1850 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) && 1851 MinDiffBytes.uge(V2Size + GEP.Offset.abs()); 1852 } 1853 1854 //===----------------------------------------------------------------------===// 1855 // BasicAliasAnalysis Pass 1856 //===----------------------------------------------------------------------===// 1857 1858 AnalysisKey BasicAA::Key; 1859 1860 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1861 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1862 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1863 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1864 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); 1865 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); 1866 } 1867 1868 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1869 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1870 } 1871 1872 char BasicAAWrapperPass::ID = 0; 1873 1874 void BasicAAWrapperPass::anchor() {} 1875 1876 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1877 "Basic Alias Analysis (stateless AA impl)", true, true) 1878 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1879 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1880 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1881 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1882 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1883 "Basic Alias Analysis (stateless AA impl)", true, true) 1884 1885 FunctionPass *llvm::createBasicAAWrapperPass() { 1886 return new BasicAAWrapperPass(); 1887 } 1888 1889 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1890 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1891 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1892 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1893 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1894 1895 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1896 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1897 &DTWP.getDomTree(), 1898 PVWP ? &PVWP->getResult() : nullptr)); 1899 1900 return false; 1901 } 1902 1903 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1904 AU.setPreservesAll(); 1905 AU.addRequiredTransitive<AssumptionCacheTracker>(); 1906 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 1907 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1908 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1909 } 1910 1911 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1912 return BasicAAResult( 1913 F.getParent()->getDataLayout(), F, 1914 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1915 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1916 } 1917