1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains routines that help analyze properties that chains of 10 // computations have. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumeBundleQueries.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/EHPersonalities.h" 30 #include "llvm/Analysis/GuardUtils.h" 31 #include "llvm/Analysis/InstructionSimplify.h" 32 #include "llvm/Analysis/Loads.h" 33 #include "llvm/Analysis/LoopInfo.h" 34 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 35 #include "llvm/Analysis/TargetLibraryInfo.h" 36 #include "llvm/IR/Argument.h" 37 #include "llvm/IR/Attributes.h" 38 #include "llvm/IR/BasicBlock.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GetElementPtrTypeIterator.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/IntrinsicsAArch64.h" 56 #include "llvm/IR/IntrinsicsRISCV.h" 57 #include "llvm/IR/IntrinsicsX86.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/IR/Metadata.h" 60 #include "llvm/IR/Module.h" 61 #include "llvm/IR/Operator.h" 62 #include "llvm/IR/PatternMatch.h" 63 #include "llvm/IR/Type.h" 64 #include "llvm/IR/User.h" 65 #include "llvm/IR/Value.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Compiler.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/KnownBits.h" 71 #include "llvm/Support/MathExtras.h" 72 #include <algorithm> 73 #include <array> 74 #include <cassert> 75 #include <cstdint> 76 #include <iterator> 77 #include <utility> 78 79 using namespace llvm; 80 using namespace llvm::PatternMatch; 81 82 // Controls the number of uses of the value searched for possible 83 // dominating comparisons. 84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 85 cl::Hidden, cl::init(20)); 86 87 // According to the LangRef, branching on a poison condition is absolutely 88 // immediate full UB. However, historically we haven't implemented that 89 // consistently as we have an important transformation (non-trivial unswitch) 90 // which introduces instances of branch on poison/undef to otherwise well 91 // defined programs. This flag exists to let us test optimization benefit 92 // of exploiting the specified behavior (in combination with enabling the 93 // unswitch fix.) 94 static cl::opt<bool> BranchOnPoisonAsUB("branch-on-poison-as-ub", 95 cl::Hidden, cl::init(false)); 96 97 98 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 99 /// returns the element type's bitwidth. 100 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 101 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 102 return BitWidth; 103 104 return DL.getPointerTypeSizeInBits(Ty); 105 } 106 107 namespace { 108 109 // Simplifying using an assume can only be done in a particular control-flow 110 // context (the context instruction provides that context). If an assume and 111 // the context instruction are not in the same block then the DT helps in 112 // figuring out if we can use it. 113 struct Query { 114 const DataLayout &DL; 115 AssumptionCache *AC; 116 const Instruction *CxtI; 117 const DominatorTree *DT; 118 119 // Unlike the other analyses, this may be a nullptr because not all clients 120 // provide it currently. 121 OptimizationRemarkEmitter *ORE; 122 123 /// If true, it is safe to use metadata during simplification. 124 InstrInfoQuery IIQ; 125 126 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 127 const DominatorTree *DT, bool UseInstrInfo, 128 OptimizationRemarkEmitter *ORE = nullptr) 129 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} 130 }; 131 132 } // end anonymous namespace 133 134 // Given the provided Value and, potentially, a context instruction, return 135 // the preferred context instruction (if any). 136 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 137 // If we've been provided with a context instruction, then use that (provided 138 // it has been inserted). 139 if (CxtI && CxtI->getParent()) 140 return CxtI; 141 142 // If the value is really an already-inserted instruction, then use that. 143 CxtI = dyn_cast<Instruction>(V); 144 if (CxtI && CxtI->getParent()) 145 return CxtI; 146 147 return nullptr; 148 } 149 150 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) { 151 // If we've been provided with a context instruction, then use that (provided 152 // it has been inserted). 153 if (CxtI && CxtI->getParent()) 154 return CxtI; 155 156 // If the value is really an already-inserted instruction, then use that. 157 CxtI = dyn_cast<Instruction>(V1); 158 if (CxtI && CxtI->getParent()) 159 return CxtI; 160 161 CxtI = dyn_cast<Instruction>(V2); 162 if (CxtI && CxtI->getParent()) 163 return CxtI; 164 165 return nullptr; 166 } 167 168 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, 169 const APInt &DemandedElts, 170 APInt &DemandedLHS, APInt &DemandedRHS) { 171 // The length of scalable vectors is unknown at compile time, thus we 172 // cannot check their values 173 if (isa<ScalableVectorType>(Shuf->getType())) 174 return false; 175 176 int NumElts = 177 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); 178 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements(); 179 DemandedLHS = DemandedRHS = APInt::getZero(NumElts); 180 if (DemandedElts.isZero()) 181 return true; 182 // Simple case of a shuffle with zeroinitializer. 183 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) { 184 DemandedLHS.setBit(0); 185 return true; 186 } 187 for (int i = 0; i != NumMaskElts; ++i) { 188 if (!DemandedElts[i]) 189 continue; 190 int M = Shuf->getMaskValue(i); 191 assert(M < (NumElts * 2) && "Invalid shuffle mask constant"); 192 193 // For undef elements, we don't know anything about the common state of 194 // the shuffle result. 195 if (M == -1) 196 return false; 197 if (M < NumElts) 198 DemandedLHS.setBit(M % NumElts); 199 else 200 DemandedRHS.setBit(M % NumElts); 201 } 202 203 return true; 204 } 205 206 static void computeKnownBits(const Value *V, const APInt &DemandedElts, 207 KnownBits &Known, unsigned Depth, const Query &Q); 208 209 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 210 const Query &Q) { 211 // FIXME: We currently have no way to represent the DemandedElts of a scalable 212 // vector 213 if (isa<ScalableVectorType>(V->getType())) { 214 Known.resetAll(); 215 return; 216 } 217 218 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 219 APInt DemandedElts = 220 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 221 computeKnownBits(V, DemandedElts, Known, Depth, Q); 222 } 223 224 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 225 const DataLayout &DL, unsigned Depth, 226 AssumptionCache *AC, const Instruction *CxtI, 227 const DominatorTree *DT, 228 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 229 ::computeKnownBits(V, Known, Depth, 230 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 231 } 232 233 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 234 KnownBits &Known, const DataLayout &DL, 235 unsigned Depth, AssumptionCache *AC, 236 const Instruction *CxtI, const DominatorTree *DT, 237 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 238 ::computeKnownBits(V, DemandedElts, Known, Depth, 239 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 240 } 241 242 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 243 unsigned Depth, const Query &Q); 244 245 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 246 const Query &Q); 247 248 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 249 unsigned Depth, AssumptionCache *AC, 250 const Instruction *CxtI, 251 const DominatorTree *DT, 252 OptimizationRemarkEmitter *ORE, 253 bool UseInstrInfo) { 254 return ::computeKnownBits( 255 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 256 } 257 258 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 259 const DataLayout &DL, unsigned Depth, 260 AssumptionCache *AC, const Instruction *CxtI, 261 const DominatorTree *DT, 262 OptimizationRemarkEmitter *ORE, 263 bool UseInstrInfo) { 264 return ::computeKnownBits( 265 V, DemandedElts, Depth, 266 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 267 } 268 269 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 270 const DataLayout &DL, AssumptionCache *AC, 271 const Instruction *CxtI, const DominatorTree *DT, 272 bool UseInstrInfo) { 273 assert(LHS->getType() == RHS->getType() && 274 "LHS and RHS should have the same type"); 275 assert(LHS->getType()->isIntOrIntVectorTy() && 276 "LHS and RHS should be integers"); 277 // Look for an inverted mask: (X & ~M) op (Y & M). 278 Value *M; 279 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 280 match(RHS, m_c_And(m_Specific(M), m_Value()))) 281 return true; 282 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 283 match(LHS, m_c_And(m_Specific(M), m_Value()))) 284 return true; 285 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 286 KnownBits LHSKnown(IT->getBitWidth()); 287 KnownBits RHSKnown(IT->getBitWidth()); 288 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 289 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 290 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown); 291 } 292 293 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) { 294 return !I->user_empty() && all_of(I->users(), [](const User *U) { 295 ICmpInst::Predicate P; 296 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P); 297 }); 298 } 299 300 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 301 const Query &Q); 302 303 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 304 bool OrZero, unsigned Depth, 305 AssumptionCache *AC, const Instruction *CxtI, 306 const DominatorTree *DT, bool UseInstrInfo) { 307 return ::isKnownToBeAPowerOfTwo( 308 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 309 } 310 311 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, 312 unsigned Depth, const Query &Q); 313 314 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 315 316 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 317 AssumptionCache *AC, const Instruction *CxtI, 318 const DominatorTree *DT, bool UseInstrInfo) { 319 return ::isKnownNonZero(V, Depth, 320 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 321 } 322 323 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 324 unsigned Depth, AssumptionCache *AC, 325 const Instruction *CxtI, const DominatorTree *DT, 326 bool UseInstrInfo) { 327 KnownBits Known = 328 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 329 return Known.isNonNegative(); 330 } 331 332 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 333 AssumptionCache *AC, const Instruction *CxtI, 334 const DominatorTree *DT, bool UseInstrInfo) { 335 if (auto *CI = dyn_cast<ConstantInt>(V)) 336 return CI->getValue().isStrictlyPositive(); 337 338 // TODO: We'd doing two recursive queries here. We should factor this such 339 // that only a single query is needed. 340 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 341 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 342 } 343 344 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 345 AssumptionCache *AC, const Instruction *CxtI, 346 const DominatorTree *DT, bool UseInstrInfo) { 347 KnownBits Known = 348 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 349 return Known.isNegative(); 350 } 351 352 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, 353 const Query &Q); 354 355 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 356 const DataLayout &DL, AssumptionCache *AC, 357 const Instruction *CxtI, const DominatorTree *DT, 358 bool UseInstrInfo) { 359 return ::isKnownNonEqual(V1, V2, 0, 360 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT, 361 UseInstrInfo, /*ORE=*/nullptr)); 362 } 363 364 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 365 const Query &Q); 366 367 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 368 const DataLayout &DL, unsigned Depth, 369 AssumptionCache *AC, const Instruction *CxtI, 370 const DominatorTree *DT, bool UseInstrInfo) { 371 return ::MaskedValueIsZero( 372 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 373 } 374 375 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 376 unsigned Depth, const Query &Q); 377 378 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 379 const Query &Q) { 380 // FIXME: We currently have no way to represent the DemandedElts of a scalable 381 // vector 382 if (isa<ScalableVectorType>(V->getType())) 383 return 1; 384 385 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 386 APInt DemandedElts = 387 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 388 return ComputeNumSignBits(V, DemandedElts, Depth, Q); 389 } 390 391 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 392 unsigned Depth, AssumptionCache *AC, 393 const Instruction *CxtI, 394 const DominatorTree *DT, bool UseInstrInfo) { 395 return ::ComputeNumSignBits( 396 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 397 } 398 399 unsigned llvm::ComputeMinSignedBits(const Value *V, const DataLayout &DL, 400 unsigned Depth, AssumptionCache *AC, 401 const Instruction *CxtI, 402 const DominatorTree *DT) { 403 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT); 404 return V->getType()->getScalarSizeInBits() - SignBits + 1; 405 } 406 407 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 408 bool NSW, const APInt &DemandedElts, 409 KnownBits &KnownOut, KnownBits &Known2, 410 unsigned Depth, const Query &Q) { 411 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); 412 413 // If one operand is unknown and we have no nowrap information, 414 // the result will be unknown independently of the second operand. 415 if (KnownOut.isUnknown() && !NSW) 416 return; 417 418 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 419 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut); 420 } 421 422 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 423 const APInt &DemandedElts, KnownBits &Known, 424 KnownBits &Known2, unsigned Depth, 425 const Query &Q) { 426 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); 427 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 428 429 bool isKnownNegative = false; 430 bool isKnownNonNegative = false; 431 // If the multiplication is known not to overflow, compute the sign bit. 432 if (NSW) { 433 if (Op0 == Op1) { 434 // The product of a number with itself is non-negative. 435 isKnownNonNegative = true; 436 } else { 437 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 438 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 439 bool isKnownNegativeOp1 = Known.isNegative(); 440 bool isKnownNegativeOp0 = Known2.isNegative(); 441 // The product of two numbers with the same sign is non-negative. 442 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 443 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 444 // The product of a negative number and a non-negative number is either 445 // negative or zero. 446 if (!isKnownNonNegative) 447 isKnownNegative = 448 (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 449 Known2.isNonZero()) || 450 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero()); 451 } 452 } 453 454 Known = KnownBits::mul(Known, Known2); 455 456 // Only make use of no-wrap flags if we failed to compute the sign bit 457 // directly. This matters if the multiplication always overflows, in 458 // which case we prefer to follow the result of the direct computation, 459 // though as the program is invoking undefined behaviour we can choose 460 // whatever we like here. 461 if (isKnownNonNegative && !Known.isNegative()) 462 Known.makeNonNegative(); 463 else if (isKnownNegative && !Known.isNonNegative()) 464 Known.makeNegative(); 465 } 466 467 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 468 KnownBits &Known) { 469 unsigned BitWidth = Known.getBitWidth(); 470 unsigned NumRanges = Ranges.getNumOperands() / 2; 471 assert(NumRanges >= 1); 472 473 Known.Zero.setAllBits(); 474 Known.One.setAllBits(); 475 476 for (unsigned i = 0; i < NumRanges; ++i) { 477 ConstantInt *Lower = 478 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 479 ConstantInt *Upper = 480 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 481 ConstantRange Range(Lower->getValue(), Upper->getValue()); 482 483 // The first CommonPrefixBits of all values in Range are equal. 484 unsigned CommonPrefixBits = 485 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 486 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 487 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth); 488 Known.One &= UnsignedMax & Mask; 489 Known.Zero &= ~UnsignedMax & Mask; 490 } 491 } 492 493 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 494 SmallVector<const Value *, 16> WorkSet(1, I); 495 SmallPtrSet<const Value *, 32> Visited; 496 SmallPtrSet<const Value *, 16> EphValues; 497 498 // The instruction defining an assumption's condition itself is always 499 // considered ephemeral to that assumption (even if it has other 500 // non-ephemeral users). See r246696's test case for an example. 501 if (is_contained(I->operands(), E)) 502 return true; 503 504 while (!WorkSet.empty()) { 505 const Value *V = WorkSet.pop_back_val(); 506 if (!Visited.insert(V).second) 507 continue; 508 509 // If all uses of this value are ephemeral, then so is this value. 510 if (llvm::all_of(V->users(), [&](const User *U) { 511 return EphValues.count(U); 512 })) { 513 if (V == E) 514 return true; 515 516 if (V == I || (isa<Instruction>(V) && 517 !cast<Instruction>(V)->mayHaveSideEffects() && 518 !cast<Instruction>(V)->isTerminator())) { 519 EphValues.insert(V); 520 if (const User *U = dyn_cast<User>(V)) 521 append_range(WorkSet, U->operands()); 522 } 523 } 524 } 525 526 return false; 527 } 528 529 // Is this an intrinsic that cannot be speculated but also cannot trap? 530 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 531 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I)) 532 return CI->isAssumeLikeIntrinsic(); 533 534 return false; 535 } 536 537 bool llvm::isValidAssumeForContext(const Instruction *Inv, 538 const Instruction *CxtI, 539 const DominatorTree *DT) { 540 // There are two restrictions on the use of an assume: 541 // 1. The assume must dominate the context (or the control flow must 542 // reach the assume whenever it reaches the context). 543 // 2. The context must not be in the assume's set of ephemeral values 544 // (otherwise we will use the assume to prove that the condition 545 // feeding the assume is trivially true, thus causing the removal of 546 // the assume). 547 548 if (Inv->getParent() == CxtI->getParent()) { 549 // If Inv and CtxI are in the same block, check if the assume (Inv) is first 550 // in the BB. 551 if (Inv->comesBefore(CxtI)) 552 return true; 553 554 // Don't let an assume affect itself - this would cause the problems 555 // `isEphemeralValueOf` is trying to prevent, and it would also make 556 // the loop below go out of bounds. 557 if (Inv == CxtI) 558 return false; 559 560 // The context comes first, but they're both in the same block. 561 // Make sure there is nothing in between that might interrupt 562 // the control flow, not even CxtI itself. 563 // We limit the scan distance between the assume and its context instruction 564 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so 565 // it can be adjusted if needed (could be turned into a cl::opt). 566 auto Range = make_range(CxtI->getIterator(), Inv->getIterator()); 567 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15)) 568 return false; 569 570 return !isEphemeralValueOf(Inv, CxtI); 571 } 572 573 // Inv and CxtI are in different blocks. 574 if (DT) { 575 if (DT->dominates(Inv, CxtI)) 576 return true; 577 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 578 // We don't have a DT, but this trivially dominates. 579 return true; 580 } 581 582 return false; 583 } 584 585 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) { 586 // v u> y implies v != 0. 587 if (Pred == ICmpInst::ICMP_UGT) 588 return true; 589 590 // Special-case v != 0 to also handle v != null. 591 if (Pred == ICmpInst::ICMP_NE) 592 return match(RHS, m_Zero()); 593 594 // All other predicates - rely on generic ConstantRange handling. 595 const APInt *C; 596 if (!match(RHS, m_APInt(C))) 597 return false; 598 599 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C); 600 return !TrueValues.contains(APInt::getZero(C->getBitWidth())); 601 } 602 603 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { 604 // Use of assumptions is context-sensitive. If we don't have a context, we 605 // cannot use them! 606 if (!Q.AC || !Q.CxtI) 607 return false; 608 609 if (Q.CxtI && V->getType()->isPointerTy()) { 610 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull}; 611 if (!NullPointerIsDefined(Q.CxtI->getFunction(), 612 V->getType()->getPointerAddressSpace())) 613 AttrKinds.push_back(Attribute::Dereferenceable); 614 615 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC)) 616 return true; 617 } 618 619 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 620 if (!AssumeVH) 621 continue; 622 CallInst *I = cast<CallInst>(AssumeVH); 623 assert(I->getFunction() == Q.CxtI->getFunction() && 624 "Got assumption for the wrong function!"); 625 626 // Warning: This loop can end up being somewhat performance sensitive. 627 // We're running this loop for once for each value queried resulting in a 628 // runtime of ~O(#assumes * #values). 629 630 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 631 "must be an assume intrinsic"); 632 633 Value *RHS; 634 CmpInst::Predicate Pred; 635 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 636 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) 637 return false; 638 639 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT)) 640 return true; 641 } 642 643 return false; 644 } 645 646 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 647 unsigned Depth, const Query &Q) { 648 // Use of assumptions is context-sensitive. If we don't have a context, we 649 // cannot use them! 650 if (!Q.AC || !Q.CxtI) 651 return; 652 653 unsigned BitWidth = Known.getBitWidth(); 654 655 // Refine Known set if the pointer alignment is set by assume bundles. 656 if (V->getType()->isPointerTy()) { 657 if (RetainedKnowledge RK = getKnowledgeValidInContext( 658 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) { 659 Known.Zero.setLowBits(Log2_64(RK.ArgValue)); 660 } 661 } 662 663 // Note that the patterns below need to be kept in sync with the code 664 // in AssumptionCache::updateAffectedValues. 665 666 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 667 if (!AssumeVH) 668 continue; 669 CallInst *I = cast<CallInst>(AssumeVH); 670 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 671 "Got assumption for the wrong function!"); 672 673 // Warning: This loop can end up being somewhat performance sensitive. 674 // We're running this loop for once for each value queried resulting in a 675 // runtime of ~O(#assumes * #values). 676 677 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 678 "must be an assume intrinsic"); 679 680 Value *Arg = I->getArgOperand(0); 681 682 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 683 assert(BitWidth == 1 && "assume operand is not i1?"); 684 Known.setAllOnes(); 685 return; 686 } 687 if (match(Arg, m_Not(m_Specific(V))) && 688 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 689 assert(BitWidth == 1 && "assume operand is not i1?"); 690 Known.setAllZero(); 691 return; 692 } 693 694 // The remaining tests are all recursive, so bail out if we hit the limit. 695 if (Depth == MaxAnalysisRecursionDepth) 696 continue; 697 698 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 699 if (!Cmp) 700 continue; 701 702 // We are attempting to compute known bits for the operands of an assume. 703 // Do not try to use other assumptions for those recursive calls because 704 // that can lead to mutual recursion and a compile-time explosion. 705 // An example of the mutual recursion: computeKnownBits can call 706 // isKnownNonZero which calls computeKnownBitsFromAssume (this function) 707 // and so on. 708 Query QueryNoAC = Q; 709 QueryNoAC.AC = nullptr; 710 711 // Note that ptrtoint may change the bitwidth. 712 Value *A, *B; 713 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 714 715 CmpInst::Predicate Pred; 716 uint64_t C; 717 switch (Cmp->getPredicate()) { 718 default: 719 break; 720 case ICmpInst::ICMP_EQ: 721 // assume(v = a) 722 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) && 723 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 724 KnownBits RHSKnown = 725 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 726 Known.Zero |= RHSKnown.Zero; 727 Known.One |= RHSKnown.One; 728 // assume(v & b = a) 729 } else if (match(Cmp, 730 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 731 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 732 KnownBits RHSKnown = 733 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 734 KnownBits MaskKnown = 735 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 736 737 // For those bits in the mask that are known to be one, we can propagate 738 // known bits from the RHS to V. 739 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 740 Known.One |= RHSKnown.One & MaskKnown.One; 741 // assume(~(v & b) = a) 742 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 743 m_Value(A))) && 744 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 745 KnownBits RHSKnown = 746 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 747 KnownBits MaskKnown = 748 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 749 750 // For those bits in the mask that are known to be one, we can propagate 751 // inverted known bits from the RHS to V. 752 Known.Zero |= RHSKnown.One & MaskKnown.One; 753 Known.One |= RHSKnown.Zero & MaskKnown.One; 754 // assume(v | b = a) 755 } else if (match(Cmp, 756 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 757 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 758 KnownBits RHSKnown = 759 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 760 KnownBits BKnown = 761 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 762 763 // For those bits in B that are known to be zero, we can propagate known 764 // bits from the RHS to V. 765 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 766 Known.One |= RHSKnown.One & BKnown.Zero; 767 // assume(~(v | b) = a) 768 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 769 m_Value(A))) && 770 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 771 KnownBits RHSKnown = 772 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 773 KnownBits BKnown = 774 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 775 776 // For those bits in B that are known to be zero, we can propagate 777 // inverted known bits from the RHS to V. 778 Known.Zero |= RHSKnown.One & BKnown.Zero; 779 Known.One |= RHSKnown.Zero & BKnown.Zero; 780 // assume(v ^ b = a) 781 } else if (match(Cmp, 782 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 783 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 784 KnownBits RHSKnown = 785 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 786 KnownBits BKnown = 787 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 788 789 // For those bits in B that are known to be zero, we can propagate known 790 // bits from the RHS to V. For those bits in B that are known to be one, 791 // we can propagate inverted known bits from the RHS to V. 792 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 793 Known.One |= RHSKnown.One & BKnown.Zero; 794 Known.Zero |= RHSKnown.One & BKnown.One; 795 Known.One |= RHSKnown.Zero & BKnown.One; 796 // assume(~(v ^ b) = a) 797 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 798 m_Value(A))) && 799 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 800 KnownBits RHSKnown = 801 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 802 KnownBits BKnown = 803 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 804 805 // For those bits in B that are known to be zero, we can propagate 806 // inverted known bits from the RHS to V. For those bits in B that are 807 // known to be one, we can propagate known bits from the RHS to V. 808 Known.Zero |= RHSKnown.One & BKnown.Zero; 809 Known.One |= RHSKnown.Zero & BKnown.Zero; 810 Known.Zero |= RHSKnown.Zero & BKnown.One; 811 Known.One |= RHSKnown.One & BKnown.One; 812 // assume(v << c = a) 813 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 814 m_Value(A))) && 815 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 816 KnownBits RHSKnown = 817 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 818 819 // For those bits in RHS that are known, we can propagate them to known 820 // bits in V shifted to the right by C. 821 RHSKnown.Zero.lshrInPlace(C); 822 Known.Zero |= RHSKnown.Zero; 823 RHSKnown.One.lshrInPlace(C); 824 Known.One |= RHSKnown.One; 825 // assume(~(v << c) = a) 826 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 827 m_Value(A))) && 828 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 829 KnownBits RHSKnown = 830 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 831 // For those bits in RHS that are known, we can propagate them inverted 832 // to known bits in V shifted to the right by C. 833 RHSKnown.One.lshrInPlace(C); 834 Known.Zero |= RHSKnown.One; 835 RHSKnown.Zero.lshrInPlace(C); 836 Known.One |= RHSKnown.Zero; 837 // assume(v >> c = a) 838 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 839 m_Value(A))) && 840 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 841 KnownBits RHSKnown = 842 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 843 // For those bits in RHS that are known, we can propagate them to known 844 // bits in V shifted to the right by C. 845 Known.Zero |= RHSKnown.Zero << C; 846 Known.One |= RHSKnown.One << C; 847 // assume(~(v >> c) = a) 848 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 849 m_Value(A))) && 850 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 851 KnownBits RHSKnown = 852 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 853 // For those bits in RHS that are known, we can propagate them inverted 854 // to known bits in V shifted to the right by C. 855 Known.Zero |= RHSKnown.One << C; 856 Known.One |= RHSKnown.Zero << C; 857 } 858 break; 859 case ICmpInst::ICMP_SGE: 860 // assume(v >=_s c) where c is non-negative 861 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 862 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 863 KnownBits RHSKnown = 864 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); 865 866 if (RHSKnown.isNonNegative()) { 867 // We know that the sign bit is zero. 868 Known.makeNonNegative(); 869 } 870 } 871 break; 872 case ICmpInst::ICMP_SGT: 873 // assume(v >_s c) where c is at least -1. 874 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 875 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 876 KnownBits RHSKnown = 877 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); 878 879 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 880 // We know that the sign bit is zero. 881 Known.makeNonNegative(); 882 } 883 } 884 break; 885 case ICmpInst::ICMP_SLE: 886 // assume(v <=_s c) where c is negative 887 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 888 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 889 KnownBits RHSKnown = 890 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); 891 892 if (RHSKnown.isNegative()) { 893 // We know that the sign bit is one. 894 Known.makeNegative(); 895 } 896 } 897 break; 898 case ICmpInst::ICMP_SLT: 899 // assume(v <_s c) where c is non-positive 900 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 901 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 902 KnownBits RHSKnown = 903 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 904 905 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 906 // We know that the sign bit is one. 907 Known.makeNegative(); 908 } 909 } 910 break; 911 case ICmpInst::ICMP_ULE: 912 // assume(v <=_u c) 913 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 914 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 915 KnownBits RHSKnown = 916 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 917 918 // Whatever high bits in c are zero are known to be zero. 919 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 920 } 921 break; 922 case ICmpInst::ICMP_ULT: 923 // assume(v <_u c) 924 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 925 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 926 KnownBits RHSKnown = 927 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 928 929 // If the RHS is known zero, then this assumption must be wrong (nothing 930 // is unsigned less than zero). Signal a conflict and get out of here. 931 if (RHSKnown.isZero()) { 932 Known.Zero.setAllBits(); 933 Known.One.setAllBits(); 934 break; 935 } 936 937 // Whatever high bits in c are zero are known to be zero (if c is a power 938 // of 2, then one more). 939 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC)) 940 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 941 else 942 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 943 } 944 break; 945 } 946 } 947 948 // If assumptions conflict with each other or previous known bits, then we 949 // have a logical fallacy. It's possible that the assumption is not reachable, 950 // so this isn't a real bug. On the other hand, the program may have undefined 951 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 952 // clear out the known bits, try to warn the user, and hope for the best. 953 if (Known.Zero.intersects(Known.One)) { 954 Known.resetAll(); 955 956 if (Q.ORE) 957 Q.ORE->emit([&]() { 958 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 959 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 960 CxtI) 961 << "Detected conflicting code assumptions. Program may " 962 "have undefined behavior, or compiler may have " 963 "internal error."; 964 }); 965 } 966 } 967 968 /// Compute known bits from a shift operator, including those with a 969 /// non-constant shift amount. Known is the output of this function. Known2 is a 970 /// pre-allocated temporary with the same bit width as Known and on return 971 /// contains the known bit of the shift value source. KF is an 972 /// operator-specific function that, given the known-bits and a shift amount, 973 /// compute the implied known-bits of the shift operator's result respectively 974 /// for that shift amount. The results from calling KF are conservatively 975 /// combined for all permitted shift amounts. 976 static void computeKnownBitsFromShiftOperator( 977 const Operator *I, const APInt &DemandedElts, KnownBits &Known, 978 KnownBits &Known2, unsigned Depth, const Query &Q, 979 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) { 980 unsigned BitWidth = Known.getBitWidth(); 981 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 982 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 983 984 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 985 // BitWidth > 64 and any upper bits are known, we'll end up returning the 986 // limit value (which implies all bits are known). 987 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 988 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 989 bool ShiftAmtIsConstant = Known.isConstant(); 990 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth); 991 992 if (ShiftAmtIsConstant) { 993 Known = KF(Known2, Known); 994 995 // If the known bits conflict, this must be an overflowing left shift, so 996 // the shift result is poison. We can return anything we want. Choose 0 for 997 // the best folding opportunity. 998 if (Known.hasConflict()) 999 Known.setAllZero(); 1000 1001 return; 1002 } 1003 1004 // If the shift amount could be greater than or equal to the bit-width of the 1005 // LHS, the value could be poison, but bail out because the check below is 1006 // expensive. 1007 // TODO: Should we just carry on? 1008 if (MaxShiftAmtIsOutOfRange) { 1009 Known.resetAll(); 1010 return; 1011 } 1012 1013 // It would be more-clearly correct to use the two temporaries for this 1014 // calculation. Reusing the APInts here to prevent unnecessary allocations. 1015 Known.resetAll(); 1016 1017 // If we know the shifter operand is nonzero, we can sometimes infer more 1018 // known bits. However this is expensive to compute, so be lazy about it and 1019 // only compute it when absolutely necessary. 1020 Optional<bool> ShifterOperandIsNonZero; 1021 1022 // Early exit if we can't constrain any well-defined shift amount. 1023 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 1024 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 1025 ShifterOperandIsNonZero = 1026 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); 1027 if (!*ShifterOperandIsNonZero) 1028 return; 1029 } 1030 1031 Known.Zero.setAllBits(); 1032 Known.One.setAllBits(); 1033 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 1034 // Combine the shifted known input bits only for those shift amounts 1035 // compatible with its known constraints. 1036 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 1037 continue; 1038 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 1039 continue; 1040 // If we know the shifter is nonzero, we may be able to infer more known 1041 // bits. This check is sunk down as far as possible to avoid the expensive 1042 // call to isKnownNonZero if the cheaper checks above fail. 1043 if (ShiftAmt == 0) { 1044 if (!ShifterOperandIsNonZero.hasValue()) 1045 ShifterOperandIsNonZero = 1046 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); 1047 if (*ShifterOperandIsNonZero) 1048 continue; 1049 } 1050 1051 Known = KnownBits::commonBits( 1052 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt)))); 1053 } 1054 1055 // If the known bits conflict, the result is poison. Return a 0 and hope the 1056 // caller can further optimize that. 1057 if (Known.hasConflict()) 1058 Known.setAllZero(); 1059 } 1060 1061 static void computeKnownBitsFromOperator(const Operator *I, 1062 const APInt &DemandedElts, 1063 KnownBits &Known, unsigned Depth, 1064 const Query &Q) { 1065 unsigned BitWidth = Known.getBitWidth(); 1066 1067 KnownBits Known2(BitWidth); 1068 switch (I->getOpcode()) { 1069 default: break; 1070 case Instruction::Load: 1071 if (MDNode *MD = 1072 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 1073 computeKnownBitsFromRangeMetadata(*MD, Known); 1074 break; 1075 case Instruction::And: { 1076 // If either the LHS or the RHS are Zero, the result is zero. 1077 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1078 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1079 1080 Known &= Known2; 1081 1082 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 1083 // here we handle the more general case of adding any odd number by 1084 // matching the form add(x, add(x, y)) where y is odd. 1085 // TODO: This could be generalized to clearing any bit set in y where the 1086 // following bit is known to be unset in y. 1087 Value *X = nullptr, *Y = nullptr; 1088 if (!Known.Zero[0] && !Known.One[0] && 1089 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 1090 Known2.resetAll(); 1091 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q); 1092 if (Known2.countMinTrailingOnes() > 0) 1093 Known.Zero.setBit(0); 1094 } 1095 break; 1096 } 1097 case Instruction::Or: 1098 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1099 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1100 1101 Known |= Known2; 1102 break; 1103 case Instruction::Xor: 1104 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1105 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1106 1107 Known ^= Known2; 1108 break; 1109 case Instruction::Mul: { 1110 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1111 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, 1112 Known, Known2, Depth, Q); 1113 break; 1114 } 1115 case Instruction::UDiv: { 1116 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1117 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1118 Known = KnownBits::udiv(Known, Known2); 1119 break; 1120 } 1121 case Instruction::Select: { 1122 const Value *LHS = nullptr, *RHS = nullptr; 1123 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1124 if (SelectPatternResult::isMinOrMax(SPF)) { 1125 computeKnownBits(RHS, Known, Depth + 1, Q); 1126 computeKnownBits(LHS, Known2, Depth + 1, Q); 1127 switch (SPF) { 1128 default: 1129 llvm_unreachable("Unhandled select pattern flavor!"); 1130 case SPF_SMAX: 1131 Known = KnownBits::smax(Known, Known2); 1132 break; 1133 case SPF_SMIN: 1134 Known = KnownBits::smin(Known, Known2); 1135 break; 1136 case SPF_UMAX: 1137 Known = KnownBits::umax(Known, Known2); 1138 break; 1139 case SPF_UMIN: 1140 Known = KnownBits::umin(Known, Known2); 1141 break; 1142 } 1143 break; 1144 } 1145 1146 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1147 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1148 1149 // Only known if known in both the LHS and RHS. 1150 Known = KnownBits::commonBits(Known, Known2); 1151 1152 if (SPF == SPF_ABS) { 1153 // RHS from matchSelectPattern returns the negation part of abs pattern. 1154 // If the negate has an NSW flag we can assume the sign bit of the result 1155 // will be 0 because that makes abs(INT_MIN) undefined. 1156 if (match(RHS, m_Neg(m_Specific(LHS))) && 1157 Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 1158 Known.Zero.setSignBit(); 1159 } 1160 1161 break; 1162 } 1163 case Instruction::FPTrunc: 1164 case Instruction::FPExt: 1165 case Instruction::FPToUI: 1166 case Instruction::FPToSI: 1167 case Instruction::SIToFP: 1168 case Instruction::UIToFP: 1169 break; // Can't work with floating point. 1170 case Instruction::PtrToInt: 1171 case Instruction::IntToPtr: 1172 // Fall through and handle them the same as zext/trunc. 1173 LLVM_FALLTHROUGH; 1174 case Instruction::ZExt: 1175 case Instruction::Trunc: { 1176 Type *SrcTy = I->getOperand(0)->getType(); 1177 1178 unsigned SrcBitWidth; 1179 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1180 // which fall through here. 1181 Type *ScalarTy = SrcTy->getScalarType(); 1182 SrcBitWidth = ScalarTy->isPointerTy() ? 1183 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 1184 Q.DL.getTypeSizeInBits(ScalarTy); 1185 1186 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1187 Known = Known.anyextOrTrunc(SrcBitWidth); 1188 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1189 Known = Known.zextOrTrunc(BitWidth); 1190 break; 1191 } 1192 case Instruction::BitCast: { 1193 Type *SrcTy = I->getOperand(0)->getType(); 1194 if (SrcTy->isIntOrPtrTy() && 1195 // TODO: For now, not handling conversions like: 1196 // (bitcast i64 %x to <2 x i32>) 1197 !I->getType()->isVectorTy()) { 1198 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1199 break; 1200 } 1201 1202 // Handle cast from vector integer type to scalar or vector integer. 1203 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy); 1204 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() || 1205 !I->getType()->isIntOrIntVectorTy()) 1206 break; 1207 1208 // Look through a cast from narrow vector elements to wider type. 1209 // Examples: v4i32 -> v2i64, v3i8 -> v24 1210 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits(); 1211 if (BitWidth % SubBitWidth == 0) { 1212 // Known bits are automatically intersected across demanded elements of a 1213 // vector. So for example, if a bit is computed as known zero, it must be 1214 // zero across all demanded elements of the vector. 1215 // 1216 // For this bitcast, each demanded element of the output is sub-divided 1217 // across a set of smaller vector elements in the source vector. To get 1218 // the known bits for an entire element of the output, compute the known 1219 // bits for each sub-element sequentially. This is done by shifting the 1220 // one-set-bit demanded elements parameter across the sub-elements for 1221 // consecutive calls to computeKnownBits. We are using the demanded 1222 // elements parameter as a mask operator. 1223 // 1224 // The known bits of each sub-element are then inserted into place 1225 // (dependent on endian) to form the full result of known bits. 1226 unsigned NumElts = DemandedElts.getBitWidth(); 1227 unsigned SubScale = BitWidth / SubBitWidth; 1228 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale); 1229 for (unsigned i = 0; i != NumElts; ++i) { 1230 if (DemandedElts[i]) 1231 SubDemandedElts.setBit(i * SubScale); 1232 } 1233 1234 KnownBits KnownSrc(SubBitWidth); 1235 for (unsigned i = 0; i != SubScale; ++i) { 1236 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, 1237 Depth + 1, Q); 1238 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; 1239 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth); 1240 } 1241 } 1242 break; 1243 } 1244 case Instruction::SExt: { 1245 // Compute the bits in the result that are not present in the input. 1246 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1247 1248 Known = Known.trunc(SrcBitWidth); 1249 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1250 // If the sign bit of the input is known set or clear, then we know the 1251 // top bits of the result. 1252 Known = Known.sext(BitWidth); 1253 break; 1254 } 1255 case Instruction::Shl: { 1256 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1257 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) { 1258 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt); 1259 // If this shift has "nsw" keyword, then the result is either a poison 1260 // value or has the same sign bit as the first operand. 1261 if (NSW) { 1262 if (KnownVal.Zero.isSignBitSet()) 1263 Result.Zero.setSignBit(); 1264 if (KnownVal.One.isSignBitSet()) 1265 Result.One.setSignBit(); 1266 } 1267 return Result; 1268 }; 1269 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1270 KF); 1271 // Trailing zeros of a right-shifted constant never decrease. 1272 const APInt *C; 1273 if (match(I->getOperand(0), m_APInt(C))) 1274 Known.Zero.setLowBits(C->countTrailingZeros()); 1275 break; 1276 } 1277 case Instruction::LShr: { 1278 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { 1279 return KnownBits::lshr(KnownVal, KnownAmt); 1280 }; 1281 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1282 KF); 1283 // Leading zeros of a left-shifted constant never decrease. 1284 const APInt *C; 1285 if (match(I->getOperand(0), m_APInt(C))) 1286 Known.Zero.setHighBits(C->countLeadingZeros()); 1287 break; 1288 } 1289 case Instruction::AShr: { 1290 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { 1291 return KnownBits::ashr(KnownVal, KnownAmt); 1292 }; 1293 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1294 KF); 1295 break; 1296 } 1297 case Instruction::Sub: { 1298 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1299 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1300 DemandedElts, Known, Known2, Depth, Q); 1301 break; 1302 } 1303 case Instruction::Add: { 1304 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1305 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1306 DemandedElts, Known, Known2, Depth, Q); 1307 break; 1308 } 1309 case Instruction::SRem: 1310 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1311 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1312 Known = KnownBits::srem(Known, Known2); 1313 break; 1314 1315 case Instruction::URem: 1316 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1317 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1318 Known = KnownBits::urem(Known, Known2); 1319 break; 1320 case Instruction::Alloca: 1321 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); 1322 break; 1323 case Instruction::GetElementPtr: { 1324 // Analyze all of the subscripts of this getelementptr instruction 1325 // to determine if we can prove known low zero bits. 1326 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1327 // Accumulate the constant indices in a separate variable 1328 // to minimize the number of calls to computeForAddSub. 1329 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); 1330 1331 gep_type_iterator GTI = gep_type_begin(I); 1332 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1333 // TrailZ can only become smaller, short-circuit if we hit zero. 1334 if (Known.isUnknown()) 1335 break; 1336 1337 Value *Index = I->getOperand(i); 1338 1339 // Handle case when index is zero. 1340 Constant *CIndex = dyn_cast<Constant>(Index); 1341 if (CIndex && CIndex->isZeroValue()) 1342 continue; 1343 1344 if (StructType *STy = GTI.getStructTypeOrNull()) { 1345 // Handle struct member offset arithmetic. 1346 1347 assert(CIndex && 1348 "Access to structure field must be known at compile time"); 1349 1350 if (CIndex->getType()->isVectorTy()) 1351 Index = CIndex->getSplatValue(); 1352 1353 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1354 const StructLayout *SL = Q.DL.getStructLayout(STy); 1355 uint64_t Offset = SL->getElementOffset(Idx); 1356 AccConstIndices += Offset; 1357 continue; 1358 } 1359 1360 // Handle array index arithmetic. 1361 Type *IndexedTy = GTI.getIndexedType(); 1362 if (!IndexedTy->isSized()) { 1363 Known.resetAll(); 1364 break; 1365 } 1366 1367 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); 1368 KnownBits IndexBits(IndexBitWidth); 1369 computeKnownBits(Index, IndexBits, Depth + 1, Q); 1370 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1371 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize(); 1372 KnownBits ScalingFactor(IndexBitWidth); 1373 // Multiply by current sizeof type. 1374 // &A[i] == A + i * sizeof(*A[i]). 1375 if (IndexTypeSize.isScalable()) { 1376 // For scalable types the only thing we know about sizeof is 1377 // that this is a multiple of the minimum size. 1378 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes)); 1379 } else if (IndexBits.isConstant()) { 1380 APInt IndexConst = IndexBits.getConstant(); 1381 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); 1382 IndexConst *= ScalingFactor; 1383 AccConstIndices += IndexConst.sextOrTrunc(BitWidth); 1384 continue; 1385 } else { 1386 ScalingFactor = 1387 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); 1388 } 1389 IndexBits = KnownBits::mul(IndexBits, ScalingFactor); 1390 1391 // If the offsets have a different width from the pointer, according 1392 // to the language reference we need to sign-extend or truncate them 1393 // to the width of the pointer. 1394 IndexBits = IndexBits.sextOrTrunc(BitWidth); 1395 1396 // Note that inbounds does *not* guarantee nsw for the addition, as only 1397 // the offset is signed, while the base address is unsigned. 1398 Known = KnownBits::computeForAddSub( 1399 /*Add=*/true, /*NSW=*/false, Known, IndexBits); 1400 } 1401 if (!Known.isUnknown() && !AccConstIndices.isZero()) { 1402 KnownBits Index = KnownBits::makeConstant(AccConstIndices); 1403 Known = KnownBits::computeForAddSub( 1404 /*Add=*/true, /*NSW=*/false, Known, Index); 1405 } 1406 break; 1407 } 1408 case Instruction::PHI: { 1409 const PHINode *P = cast<PHINode>(I); 1410 BinaryOperator *BO = nullptr; 1411 Value *R = nullptr, *L = nullptr; 1412 if (matchSimpleRecurrence(P, BO, R, L)) { 1413 // Handle the case of a simple two-predecessor recurrence PHI. 1414 // There's a lot more that could theoretically be done here, but 1415 // this is sufficient to catch some interesting cases. 1416 unsigned Opcode = BO->getOpcode(); 1417 1418 // If this is a shift recurrence, we know the bits being shifted in. 1419 // We can combine that with information about the start value of the 1420 // recurrence to conclude facts about the result. 1421 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr || 1422 Opcode == Instruction::Shl) && 1423 BO->getOperand(0) == I) { 1424 1425 // We have matched a recurrence of the form: 1426 // %iv = [R, %entry], [%iv.next, %backedge] 1427 // %iv.next = shift_op %iv, L 1428 1429 // Recurse with the phi context to avoid concern about whether facts 1430 // inferred hold at original context instruction. TODO: It may be 1431 // correct to use the original context. IF warranted, explore and 1432 // add sufficient tests to cover. 1433 Query RecQ = Q; 1434 RecQ.CxtI = P; 1435 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); 1436 switch (Opcode) { 1437 case Instruction::Shl: 1438 // A shl recurrence will only increase the tailing zeros 1439 Known.Zero.setLowBits(Known2.countMinTrailingZeros()); 1440 break; 1441 case Instruction::LShr: 1442 // A lshr recurrence will preserve the leading zeros of the 1443 // start value 1444 Known.Zero.setHighBits(Known2.countMinLeadingZeros()); 1445 break; 1446 case Instruction::AShr: 1447 // An ashr recurrence will extend the initial sign bit 1448 Known.Zero.setHighBits(Known2.countMinLeadingZeros()); 1449 Known.One.setHighBits(Known2.countMinLeadingOnes()); 1450 break; 1451 }; 1452 } 1453 1454 // Check for operations that have the property that if 1455 // both their operands have low zero bits, the result 1456 // will have low zero bits. 1457 if (Opcode == Instruction::Add || 1458 Opcode == Instruction::Sub || 1459 Opcode == Instruction::And || 1460 Opcode == Instruction::Or || 1461 Opcode == Instruction::Mul) { 1462 // Change the context instruction to the "edge" that flows into the 1463 // phi. This is important because that is where the value is actually 1464 // "evaluated" even though it is used later somewhere else. (see also 1465 // D69571). 1466 Query RecQ = Q; 1467 1468 unsigned OpNum = P->getOperand(0) == R ? 0 : 1; 1469 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); 1470 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator(); 1471 1472 // Ok, we have a PHI of the form L op= R. Check for low 1473 // zero bits. 1474 RecQ.CxtI = RInst; 1475 computeKnownBits(R, Known2, Depth + 1, RecQ); 1476 1477 // We need to take the minimum number of known bits 1478 KnownBits Known3(BitWidth); 1479 RecQ.CxtI = LInst; 1480 computeKnownBits(L, Known3, Depth + 1, RecQ); 1481 1482 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1483 Known3.countMinTrailingZeros())); 1484 1485 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO); 1486 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1487 // If initial value of recurrence is nonnegative, and we are adding 1488 // a nonnegative number with nsw, the result can only be nonnegative 1489 // or poison value regardless of the number of times we execute the 1490 // add in phi recurrence. If initial value is negative and we are 1491 // adding a negative number with nsw, the result can only be 1492 // negative or poison value. Similar arguments apply to sub and mul. 1493 // 1494 // (add non-negative, non-negative) --> non-negative 1495 // (add negative, negative) --> negative 1496 if (Opcode == Instruction::Add) { 1497 if (Known2.isNonNegative() && Known3.isNonNegative()) 1498 Known.makeNonNegative(); 1499 else if (Known2.isNegative() && Known3.isNegative()) 1500 Known.makeNegative(); 1501 } 1502 1503 // (sub nsw non-negative, negative) --> non-negative 1504 // (sub nsw negative, non-negative) --> negative 1505 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) { 1506 if (Known2.isNonNegative() && Known3.isNegative()) 1507 Known.makeNonNegative(); 1508 else if (Known2.isNegative() && Known3.isNonNegative()) 1509 Known.makeNegative(); 1510 } 1511 1512 // (mul nsw non-negative, non-negative) --> non-negative 1513 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1514 Known3.isNonNegative()) 1515 Known.makeNonNegative(); 1516 } 1517 1518 break; 1519 } 1520 } 1521 1522 // Unreachable blocks may have zero-operand PHI nodes. 1523 if (P->getNumIncomingValues() == 0) 1524 break; 1525 1526 // Otherwise take the unions of the known bit sets of the operands, 1527 // taking conservative care to avoid excessive recursion. 1528 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) { 1529 // Skip if every incoming value references to ourself. 1530 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) 1531 break; 1532 1533 Known.Zero.setAllBits(); 1534 Known.One.setAllBits(); 1535 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { 1536 Value *IncValue = P->getIncomingValue(u); 1537 // Skip direct self references. 1538 if (IncValue == P) continue; 1539 1540 // Change the context instruction to the "edge" that flows into the 1541 // phi. This is important because that is where the value is actually 1542 // "evaluated" even though it is used later somewhere else. (see also 1543 // D69571). 1544 Query RecQ = Q; 1545 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); 1546 1547 Known2 = KnownBits(BitWidth); 1548 // Recurse, but cap the recursion to one level, because we don't 1549 // want to waste time spinning around in loops. 1550 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ); 1551 Known = KnownBits::commonBits(Known, Known2); 1552 // If all bits have been ruled out, there's no need to check 1553 // more operands. 1554 if (Known.isUnknown()) 1555 break; 1556 } 1557 } 1558 break; 1559 } 1560 case Instruction::Call: 1561 case Instruction::Invoke: 1562 // If range metadata is attached to this call, set known bits from that, 1563 // and then intersect with known bits based on other properties of the 1564 // function. 1565 if (MDNode *MD = 1566 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1567 computeKnownBitsFromRangeMetadata(*MD, Known); 1568 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) { 1569 computeKnownBits(RV, Known2, Depth + 1, Q); 1570 Known.Zero |= Known2.Zero; 1571 Known.One |= Known2.One; 1572 } 1573 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1574 switch (II->getIntrinsicID()) { 1575 default: break; 1576 case Intrinsic::abs: { 1577 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1578 bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); 1579 Known = Known2.abs(IntMinIsPoison); 1580 break; 1581 } 1582 case Intrinsic::bitreverse: 1583 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1584 Known.Zero |= Known2.Zero.reverseBits(); 1585 Known.One |= Known2.One.reverseBits(); 1586 break; 1587 case Intrinsic::bswap: 1588 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1589 Known.Zero |= Known2.Zero.byteSwap(); 1590 Known.One |= Known2.One.byteSwap(); 1591 break; 1592 case Intrinsic::ctlz: { 1593 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1594 // If we have a known 1, its position is our upper bound. 1595 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 1596 // If this call is undefined for 0, the result will be less than 2^n. 1597 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1598 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1599 unsigned LowBits = Log2_32(PossibleLZ)+1; 1600 Known.Zero.setBitsFrom(LowBits); 1601 break; 1602 } 1603 case Intrinsic::cttz: { 1604 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1605 // If we have a known 1, its position is our upper bound. 1606 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 1607 // If this call is undefined for 0, the result will be less than 2^n. 1608 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1609 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1610 unsigned LowBits = Log2_32(PossibleTZ)+1; 1611 Known.Zero.setBitsFrom(LowBits); 1612 break; 1613 } 1614 case Intrinsic::ctpop: { 1615 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1616 // We can bound the space the count needs. Also, bits known to be zero 1617 // can't contribute to the population. 1618 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1619 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1620 Known.Zero.setBitsFrom(LowBits); 1621 // TODO: we could bound KnownOne using the lower bound on the number 1622 // of bits which might be set provided by popcnt KnownOne2. 1623 break; 1624 } 1625 case Intrinsic::fshr: 1626 case Intrinsic::fshl: { 1627 const APInt *SA; 1628 if (!match(I->getOperand(2), m_APInt(SA))) 1629 break; 1630 1631 // Normalize to funnel shift left. 1632 uint64_t ShiftAmt = SA->urem(BitWidth); 1633 if (II->getIntrinsicID() == Intrinsic::fshr) 1634 ShiftAmt = BitWidth - ShiftAmt; 1635 1636 KnownBits Known3(BitWidth); 1637 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1638 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); 1639 1640 Known.Zero = 1641 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); 1642 Known.One = 1643 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); 1644 break; 1645 } 1646 case Intrinsic::uadd_sat: 1647 case Intrinsic::usub_sat: { 1648 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat; 1649 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1650 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1651 1652 // Add: Leading ones of either operand are preserved. 1653 // Sub: Leading zeros of LHS and leading ones of RHS are preserved 1654 // as leading zeros in the result. 1655 unsigned LeadingKnown; 1656 if (IsAdd) 1657 LeadingKnown = std::max(Known.countMinLeadingOnes(), 1658 Known2.countMinLeadingOnes()); 1659 else 1660 LeadingKnown = std::max(Known.countMinLeadingZeros(), 1661 Known2.countMinLeadingOnes()); 1662 1663 Known = KnownBits::computeForAddSub( 1664 IsAdd, /* NSW */ false, Known, Known2); 1665 1666 // We select between the operation result and all-ones/zero 1667 // respectively, so we can preserve known ones/zeros. 1668 if (IsAdd) { 1669 Known.One.setHighBits(LeadingKnown); 1670 Known.Zero.clearAllBits(); 1671 } else { 1672 Known.Zero.setHighBits(LeadingKnown); 1673 Known.One.clearAllBits(); 1674 } 1675 break; 1676 } 1677 case Intrinsic::umin: 1678 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1679 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1680 Known = KnownBits::umin(Known, Known2); 1681 break; 1682 case Intrinsic::umax: 1683 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1684 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1685 Known = KnownBits::umax(Known, Known2); 1686 break; 1687 case Intrinsic::smin: 1688 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1689 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1690 Known = KnownBits::smin(Known, Known2); 1691 break; 1692 case Intrinsic::smax: 1693 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1694 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1695 Known = KnownBits::smax(Known, Known2); 1696 break; 1697 case Intrinsic::x86_sse42_crc32_64_64: 1698 Known.Zero.setBitsFrom(32); 1699 break; 1700 case Intrinsic::riscv_vsetvli: 1701 case Intrinsic::riscv_vsetvlimax: 1702 // Assume that VL output is positive and would fit in an int32_t. 1703 // TODO: VLEN might be capped at 16 bits in a future V spec update. 1704 if (BitWidth >= 32) 1705 Known.Zero.setBitsFrom(31); 1706 break; 1707 case Intrinsic::vscale: { 1708 if (!II->getParent() || !II->getFunction() || 1709 !II->getFunction()->hasFnAttribute(Attribute::VScaleRange)) 1710 break; 1711 1712 auto VScaleRange = II->getFunction() 1713 ->getFnAttribute(Attribute::VScaleRange) 1714 .getVScaleRangeArgs(); 1715 1716 if (VScaleRange.second == 0) 1717 break; 1718 1719 // If vscale min = max then we know the exact value at compile time 1720 // and hence we know the exact bits. 1721 if (VScaleRange.first == VScaleRange.second) { 1722 Known.One = VScaleRange.first; 1723 Known.Zero = VScaleRange.first; 1724 Known.Zero.flipAllBits(); 1725 break; 1726 } 1727 1728 unsigned FirstZeroHighBit = 32 - countLeadingZeros(VScaleRange.second); 1729 if (FirstZeroHighBit < BitWidth) 1730 Known.Zero.setBitsFrom(FirstZeroHighBit); 1731 1732 break; 1733 } 1734 } 1735 } 1736 break; 1737 case Instruction::ShuffleVector: { 1738 auto *Shuf = dyn_cast<ShuffleVectorInst>(I); 1739 // FIXME: Do we need to handle ConstantExpr involving shufflevectors? 1740 if (!Shuf) { 1741 Known.resetAll(); 1742 return; 1743 } 1744 // For undef elements, we don't know anything about the common state of 1745 // the shuffle result. 1746 APInt DemandedLHS, DemandedRHS; 1747 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) { 1748 Known.resetAll(); 1749 return; 1750 } 1751 Known.One.setAllBits(); 1752 Known.Zero.setAllBits(); 1753 if (!!DemandedLHS) { 1754 const Value *LHS = Shuf->getOperand(0); 1755 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); 1756 // If we don't know any bits, early out. 1757 if (Known.isUnknown()) 1758 break; 1759 } 1760 if (!!DemandedRHS) { 1761 const Value *RHS = Shuf->getOperand(1); 1762 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); 1763 Known = KnownBits::commonBits(Known, Known2); 1764 } 1765 break; 1766 } 1767 case Instruction::InsertElement: { 1768 const Value *Vec = I->getOperand(0); 1769 const Value *Elt = I->getOperand(1); 1770 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); 1771 // Early out if the index is non-constant or out-of-range. 1772 unsigned NumElts = DemandedElts.getBitWidth(); 1773 if (!CIdx || CIdx->getValue().uge(NumElts)) { 1774 Known.resetAll(); 1775 return; 1776 } 1777 Known.One.setAllBits(); 1778 Known.Zero.setAllBits(); 1779 unsigned EltIdx = CIdx->getZExtValue(); 1780 // Do we demand the inserted element? 1781 if (DemandedElts[EltIdx]) { 1782 computeKnownBits(Elt, Known, Depth + 1, Q); 1783 // If we don't know any bits, early out. 1784 if (Known.isUnknown()) 1785 break; 1786 } 1787 // We don't need the base vector element that has been inserted. 1788 APInt DemandedVecElts = DemandedElts; 1789 DemandedVecElts.clearBit(EltIdx); 1790 if (!!DemandedVecElts) { 1791 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); 1792 Known = KnownBits::commonBits(Known, Known2); 1793 } 1794 break; 1795 } 1796 case Instruction::ExtractElement: { 1797 // Look through extract element. If the index is non-constant or 1798 // out-of-range demand all elements, otherwise just the extracted element. 1799 const Value *Vec = I->getOperand(0); 1800 const Value *Idx = I->getOperand(1); 1801 auto *CIdx = dyn_cast<ConstantInt>(Idx); 1802 if (isa<ScalableVectorType>(Vec->getType())) { 1803 // FIXME: there's probably *something* we can do with scalable vectors 1804 Known.resetAll(); 1805 break; 1806 } 1807 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 1808 APInt DemandedVecElts = APInt::getAllOnes(NumElts); 1809 if (CIdx && CIdx->getValue().ult(NumElts)) 1810 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 1811 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); 1812 break; 1813 } 1814 case Instruction::ExtractValue: 1815 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1816 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1817 if (EVI->getNumIndices() != 1) break; 1818 if (EVI->getIndices()[0] == 0) { 1819 switch (II->getIntrinsicID()) { 1820 default: break; 1821 case Intrinsic::uadd_with_overflow: 1822 case Intrinsic::sadd_with_overflow: 1823 computeKnownBitsAddSub(true, II->getArgOperand(0), 1824 II->getArgOperand(1), false, DemandedElts, 1825 Known, Known2, Depth, Q); 1826 break; 1827 case Intrinsic::usub_with_overflow: 1828 case Intrinsic::ssub_with_overflow: 1829 computeKnownBitsAddSub(false, II->getArgOperand(0), 1830 II->getArgOperand(1), false, DemandedElts, 1831 Known, Known2, Depth, Q); 1832 break; 1833 case Intrinsic::umul_with_overflow: 1834 case Intrinsic::smul_with_overflow: 1835 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1836 DemandedElts, Known, Known2, Depth, Q); 1837 break; 1838 } 1839 } 1840 } 1841 break; 1842 case Instruction::Freeze: 1843 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, 1844 Depth + 1)) 1845 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1846 break; 1847 } 1848 } 1849 1850 /// Determine which bits of V are known to be either zero or one and return 1851 /// them. 1852 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 1853 unsigned Depth, const Query &Q) { 1854 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1855 computeKnownBits(V, DemandedElts, Known, Depth, Q); 1856 return Known; 1857 } 1858 1859 /// Determine which bits of V are known to be either zero or one and return 1860 /// them. 1861 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1862 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1863 computeKnownBits(V, Known, Depth, Q); 1864 return Known; 1865 } 1866 1867 /// Determine which bits of V are known to be either zero or one and return 1868 /// them in the Known bit set. 1869 /// 1870 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1871 /// we cannot optimize based on the assumption that it is zero without changing 1872 /// it to be an explicit zero. If we don't change it to zero, other code could 1873 /// optimized based on the contradictory assumption that it is non-zero. 1874 /// Because instcombine aggressively folds operations with undef args anyway, 1875 /// this won't lose us code quality. 1876 /// 1877 /// This function is defined on values with integer type, values with pointer 1878 /// type, and vectors of integers. In the case 1879 /// where V is a vector, known zero, and known one values are the 1880 /// same width as the vector element, and the bit is set only if it is true 1881 /// for all of the demanded elements in the vector specified by DemandedElts. 1882 void computeKnownBits(const Value *V, const APInt &DemandedElts, 1883 KnownBits &Known, unsigned Depth, const Query &Q) { 1884 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) { 1885 // No demanded elts or V is a scalable vector, better to assume we don't 1886 // know anything. 1887 Known.resetAll(); 1888 return; 1889 } 1890 1891 assert(V && "No Value?"); 1892 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 1893 1894 #ifndef NDEBUG 1895 Type *Ty = V->getType(); 1896 unsigned BitWidth = Known.getBitWidth(); 1897 1898 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && 1899 "Not integer or pointer type!"); 1900 1901 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 1902 assert( 1903 FVTy->getNumElements() == DemandedElts.getBitWidth() && 1904 "DemandedElt width should equal the fixed vector number of elements"); 1905 } else { 1906 assert(DemandedElts == APInt(1, 1) && 1907 "DemandedElt width should be 1 for scalars"); 1908 } 1909 1910 Type *ScalarTy = Ty->getScalarType(); 1911 if (ScalarTy->isPointerTy()) { 1912 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && 1913 "V and Known should have same BitWidth"); 1914 } else { 1915 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && 1916 "V and Known should have same BitWidth"); 1917 } 1918 #endif 1919 1920 const APInt *C; 1921 if (match(V, m_APInt(C))) { 1922 // We know all of the bits for a scalar constant or a splat vector constant! 1923 Known = KnownBits::makeConstant(*C); 1924 return; 1925 } 1926 // Null and aggregate-zero are all-zeros. 1927 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1928 Known.setAllZero(); 1929 return; 1930 } 1931 // Handle a constant vector by taking the intersection of the known bits of 1932 // each element. 1933 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { 1934 // We know that CDV must be a vector of integers. Take the intersection of 1935 // each element. 1936 Known.Zero.setAllBits(); Known.One.setAllBits(); 1937 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { 1938 if (!DemandedElts[i]) 1939 continue; 1940 APInt Elt = CDV->getElementAsAPInt(i); 1941 Known.Zero &= ~Elt; 1942 Known.One &= Elt; 1943 } 1944 return; 1945 } 1946 1947 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1948 // We know that CV must be a vector of integers. Take the intersection of 1949 // each element. 1950 Known.Zero.setAllBits(); Known.One.setAllBits(); 1951 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1952 if (!DemandedElts[i]) 1953 continue; 1954 Constant *Element = CV->getAggregateElement(i); 1955 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1956 if (!ElementCI) { 1957 Known.resetAll(); 1958 return; 1959 } 1960 const APInt &Elt = ElementCI->getValue(); 1961 Known.Zero &= ~Elt; 1962 Known.One &= Elt; 1963 } 1964 return; 1965 } 1966 1967 // Start out not knowing anything. 1968 Known.resetAll(); 1969 1970 // We can't imply anything about undefs. 1971 if (isa<UndefValue>(V)) 1972 return; 1973 1974 // There's no point in looking through other users of ConstantData for 1975 // assumptions. Confirm that we've handled them all. 1976 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1977 1978 // All recursive calls that increase depth must come after this. 1979 if (Depth == MaxAnalysisRecursionDepth) 1980 return; 1981 1982 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1983 // the bits of its aliasee. 1984 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1985 if (!GA->isInterposable()) 1986 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1987 return; 1988 } 1989 1990 if (const Operator *I = dyn_cast<Operator>(V)) 1991 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); 1992 1993 // Aligned pointers have trailing zeros - refine Known.Zero set 1994 if (isa<PointerType>(V->getType())) { 1995 Align Alignment = V->getPointerAlignment(Q.DL); 1996 Known.Zero.setLowBits(Log2(Alignment)); 1997 } 1998 1999 // computeKnownBitsFromAssume strictly refines Known. 2000 // Therefore, we run them after computeKnownBitsFromOperator. 2001 2002 // Check whether a nearby assume intrinsic can determine some known bits. 2003 computeKnownBitsFromAssume(V, Known, Depth, Q); 2004 2005 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 2006 } 2007 2008 /// Return true if the given value is known to have exactly one 2009 /// bit set when defined. For vectors return true if every element is known to 2010 /// be a power of two when defined. Supports values with integer or pointer 2011 /// types and vectors of integers. 2012 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 2013 const Query &Q) { 2014 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 2015 2016 // Attempt to match against constants. 2017 if (OrZero && match(V, m_Power2OrZero())) 2018 return true; 2019 if (match(V, m_Power2())) 2020 return true; 2021 2022 // 1 << X is clearly a power of two if the one is not shifted off the end. If 2023 // it is shifted off the end then the result is undefined. 2024 if (match(V, m_Shl(m_One(), m_Value()))) 2025 return true; 2026 2027 // (signmask) >>l X is clearly a power of two if the one is not shifted off 2028 // the bottom. If it is shifted off the bottom then the result is undefined. 2029 if (match(V, m_LShr(m_SignMask(), m_Value()))) 2030 return true; 2031 2032 // The remaining tests are all recursive, so bail out if we hit the limit. 2033 if (Depth++ == MaxAnalysisRecursionDepth) 2034 return false; 2035 2036 Value *X = nullptr, *Y = nullptr; 2037 // A shift left or a logical shift right of a power of two is a power of two 2038 // or zero. 2039 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 2040 match(V, m_LShr(m_Value(X), m_Value())))) 2041 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 2042 2043 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 2044 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 2045 2046 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 2047 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 2048 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 2049 2050 // Peek through min/max. 2051 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) { 2052 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) && 2053 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q); 2054 } 2055 2056 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 2057 // A power of two and'd with anything is a power of two or zero. 2058 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 2059 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 2060 return true; 2061 // X & (-X) is always a power of two or zero. 2062 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 2063 return true; 2064 return false; 2065 } 2066 2067 // Adding a power-of-two or zero to the same power-of-two or zero yields 2068 // either the original power-of-two, a larger power-of-two or zero. 2069 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2070 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 2071 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 2072 Q.IIQ.hasNoSignedWrap(VOBO)) { 2073 if (match(X, m_And(m_Specific(Y), m_Value())) || 2074 match(X, m_And(m_Value(), m_Specific(Y)))) 2075 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 2076 return true; 2077 if (match(Y, m_And(m_Specific(X), m_Value())) || 2078 match(Y, m_And(m_Value(), m_Specific(X)))) 2079 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 2080 return true; 2081 2082 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 2083 KnownBits LHSBits(BitWidth); 2084 computeKnownBits(X, LHSBits, Depth, Q); 2085 2086 KnownBits RHSBits(BitWidth); 2087 computeKnownBits(Y, RHSBits, Depth, Q); 2088 // If i8 V is a power of two or zero: 2089 // ZeroBits: 1 1 1 0 1 1 1 1 2090 // ~ZeroBits: 0 0 0 1 0 0 0 0 2091 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 2092 // If OrZero isn't set, we cannot give back a zero result. 2093 // Make sure either the LHS or RHS has a bit set. 2094 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 2095 return true; 2096 } 2097 } 2098 2099 // An exact divide or right shift can only shift off zero bits, so the result 2100 // is a power of two only if the first operand is a power of two and not 2101 // copying a sign bit (sdiv int_min, 2). 2102 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 2103 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 2104 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 2105 Depth, Q); 2106 } 2107 2108 return false; 2109 } 2110 2111 /// Test whether a GEP's result is known to be non-null. 2112 /// 2113 /// Uses properties inherent in a GEP to try to determine whether it is known 2114 /// to be non-null. 2115 /// 2116 /// Currently this routine does not support vector GEPs. 2117 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 2118 const Query &Q) { 2119 const Function *F = nullptr; 2120 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 2121 F = I->getFunction(); 2122 2123 if (!GEP->isInBounds() || 2124 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 2125 return false; 2126 2127 // FIXME: Support vector-GEPs. 2128 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 2129 2130 // If the base pointer is non-null, we cannot walk to a null address with an 2131 // inbounds GEP in address space zero. 2132 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 2133 return true; 2134 2135 // Walk the GEP operands and see if any operand introduces a non-zero offset. 2136 // If so, then the GEP cannot produce a null pointer, as doing so would 2137 // inherently violate the inbounds contract within address space zero. 2138 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 2139 GTI != GTE; ++GTI) { 2140 // Struct types are easy -- they must always be indexed by a constant. 2141 if (StructType *STy = GTI.getStructTypeOrNull()) { 2142 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 2143 unsigned ElementIdx = OpC->getZExtValue(); 2144 const StructLayout *SL = Q.DL.getStructLayout(STy); 2145 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 2146 if (ElementOffset > 0) 2147 return true; 2148 continue; 2149 } 2150 2151 // If we have a zero-sized type, the index doesn't matter. Keep looping. 2152 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0) 2153 continue; 2154 2155 // Fast path the constant operand case both for efficiency and so we don't 2156 // increment Depth when just zipping down an all-constant GEP. 2157 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 2158 if (!OpC->isZero()) 2159 return true; 2160 continue; 2161 } 2162 2163 // We post-increment Depth here because while isKnownNonZero increments it 2164 // as well, when we pop back up that increment won't persist. We don't want 2165 // to recurse 10k times just because we have 10k GEP operands. We don't 2166 // bail completely out because we want to handle constant GEPs regardless 2167 // of depth. 2168 if (Depth++ >= MaxAnalysisRecursionDepth) 2169 continue; 2170 2171 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 2172 return true; 2173 } 2174 2175 return false; 2176 } 2177 2178 static bool isKnownNonNullFromDominatingCondition(const Value *V, 2179 const Instruction *CtxI, 2180 const DominatorTree *DT) { 2181 if (isa<Constant>(V)) 2182 return false; 2183 2184 if (!CtxI || !DT) 2185 return false; 2186 2187 unsigned NumUsesExplored = 0; 2188 for (auto *U : V->users()) { 2189 // Avoid massive lists 2190 if (NumUsesExplored >= DomConditionsMaxUses) 2191 break; 2192 NumUsesExplored++; 2193 2194 // If the value is used as an argument to a call or invoke, then argument 2195 // attributes may provide an answer about null-ness. 2196 if (const auto *CB = dyn_cast<CallBase>(U)) 2197 if (auto *CalledFunc = CB->getCalledFunction()) 2198 for (const Argument &Arg : CalledFunc->args()) 2199 if (CB->getArgOperand(Arg.getArgNo()) == V && 2200 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) && 2201 DT->dominates(CB, CtxI)) 2202 return true; 2203 2204 // If the value is used as a load/store, then the pointer must be non null. 2205 if (V == getLoadStorePointerOperand(U)) { 2206 const Instruction *I = cast<Instruction>(U); 2207 if (!NullPointerIsDefined(I->getFunction(), 2208 V->getType()->getPointerAddressSpace()) && 2209 DT->dominates(I, CtxI)) 2210 return true; 2211 } 2212 2213 // Consider only compare instructions uniquely controlling a branch 2214 Value *RHS; 2215 CmpInst::Predicate Pred; 2216 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS)))) 2217 continue; 2218 2219 bool NonNullIfTrue; 2220 if (cmpExcludesZero(Pred, RHS)) 2221 NonNullIfTrue = true; 2222 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS)) 2223 NonNullIfTrue = false; 2224 else 2225 continue; 2226 2227 SmallVector<const User *, 4> WorkList; 2228 SmallPtrSet<const User *, 4> Visited; 2229 for (auto *CmpU : U->users()) { 2230 assert(WorkList.empty() && "Should be!"); 2231 if (Visited.insert(CmpU).second) 2232 WorkList.push_back(CmpU); 2233 2234 while (!WorkList.empty()) { 2235 auto *Curr = WorkList.pop_back_val(); 2236 2237 // If a user is an AND, add all its users to the work list. We only 2238 // propagate "pred != null" condition through AND because it is only 2239 // correct to assume that all conditions of AND are met in true branch. 2240 // TODO: Support similar logic of OR and EQ predicate? 2241 if (NonNullIfTrue) 2242 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) { 2243 for (auto *CurrU : Curr->users()) 2244 if (Visited.insert(CurrU).second) 2245 WorkList.push_back(CurrU); 2246 continue; 2247 } 2248 2249 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 2250 assert(BI->isConditional() && "uses a comparison!"); 2251 2252 BasicBlock *NonNullSuccessor = 2253 BI->getSuccessor(NonNullIfTrue ? 0 : 1); 2254 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 2255 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 2256 return true; 2257 } else if (NonNullIfTrue && isGuard(Curr) && 2258 DT->dominates(cast<Instruction>(Curr), CtxI)) { 2259 return true; 2260 } 2261 } 2262 } 2263 } 2264 2265 return false; 2266 } 2267 2268 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 2269 /// ensure that the value it's attached to is never Value? 'RangeType' is 2270 /// is the type of the value described by the range. 2271 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 2272 const unsigned NumRanges = Ranges->getNumOperands() / 2; 2273 assert(NumRanges >= 1); 2274 for (unsigned i = 0; i < NumRanges; ++i) { 2275 ConstantInt *Lower = 2276 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 2277 ConstantInt *Upper = 2278 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 2279 ConstantRange Range(Lower->getValue(), Upper->getValue()); 2280 if (Range.contains(Value)) 2281 return false; 2282 } 2283 return true; 2284 } 2285 2286 /// Try to detect a recurrence that monotonically increases/decreases from a 2287 /// non-zero starting value. These are common as induction variables. 2288 static bool isNonZeroRecurrence(const PHINode *PN) { 2289 BinaryOperator *BO = nullptr; 2290 Value *Start = nullptr, *Step = nullptr; 2291 const APInt *StartC, *StepC; 2292 if (!matchSimpleRecurrence(PN, BO, Start, Step) || 2293 !match(Start, m_APInt(StartC)) || StartC->isZero()) 2294 return false; 2295 2296 switch (BO->getOpcode()) { 2297 case Instruction::Add: 2298 // Starting from non-zero and stepping away from zero can never wrap back 2299 // to zero. 2300 return BO->hasNoUnsignedWrap() || 2301 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) && 2302 StartC->isNegative() == StepC->isNegative()); 2303 case Instruction::Mul: 2304 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && 2305 match(Step, m_APInt(StepC)) && !StepC->isZero(); 2306 case Instruction::Shl: 2307 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); 2308 case Instruction::AShr: 2309 case Instruction::LShr: 2310 return BO->isExact(); 2311 default: 2312 return false; 2313 } 2314 } 2315 2316 /// Return true if the given value is known to be non-zero when defined. For 2317 /// vectors, return true if every demanded element is known to be non-zero when 2318 /// defined. For pointers, if the context instruction and dominator tree are 2319 /// specified, perform context-sensitive analysis and return true if the 2320 /// pointer couldn't possibly be null at the specified instruction. 2321 /// Supports values with integer or pointer type and vectors of integers. 2322 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, 2323 const Query &Q) { 2324 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2325 // vector 2326 if (isa<ScalableVectorType>(V->getType())) 2327 return false; 2328 2329 if (auto *C = dyn_cast<Constant>(V)) { 2330 if (C->isNullValue()) 2331 return false; 2332 if (isa<ConstantInt>(C)) 2333 // Must be non-zero due to null test above. 2334 return true; 2335 2336 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 2337 // See the comment for IntToPtr/PtrToInt instructions below. 2338 if (CE->getOpcode() == Instruction::IntToPtr || 2339 CE->getOpcode() == Instruction::PtrToInt) 2340 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) 2341 .getFixedSize() <= 2342 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize()) 2343 return isKnownNonZero(CE->getOperand(0), Depth, Q); 2344 } 2345 2346 // For constant vectors, check that all elements are undefined or known 2347 // non-zero to determine that the whole vector is known non-zero. 2348 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) { 2349 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 2350 if (!DemandedElts[i]) 2351 continue; 2352 Constant *Elt = C->getAggregateElement(i); 2353 if (!Elt || Elt->isNullValue()) 2354 return false; 2355 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 2356 return false; 2357 } 2358 return true; 2359 } 2360 2361 // A global variable in address space 0 is non null unless extern weak 2362 // or an absolute symbol reference. Other address spaces may have null as a 2363 // valid address for a global, so we can't assume anything. 2364 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 2365 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 2366 GV->getType()->getAddressSpace() == 0) 2367 return true; 2368 } else 2369 return false; 2370 } 2371 2372 if (auto *I = dyn_cast<Instruction>(V)) { 2373 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 2374 // If the possible ranges don't contain zero, then the value is 2375 // definitely non-zero. 2376 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 2377 const APInt ZeroValue(Ty->getBitWidth(), 0); 2378 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 2379 return true; 2380 } 2381 } 2382 } 2383 2384 if (isKnownNonZeroFromAssume(V, Q)) 2385 return true; 2386 2387 // Some of the tests below are recursive, so bail out if we hit the limit. 2388 if (Depth++ >= MaxAnalysisRecursionDepth) 2389 return false; 2390 2391 // Check for pointer simplifications. 2392 2393 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) { 2394 // Alloca never returns null, malloc might. 2395 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 2396 return true; 2397 2398 // A byval, inalloca may not be null in a non-default addres space. A 2399 // nonnull argument is assumed never 0. 2400 if (const Argument *A = dyn_cast<Argument>(V)) { 2401 if (((A->hasPassPointeeByValueCopyAttr() && 2402 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || 2403 A->hasNonNullAttr())) 2404 return true; 2405 } 2406 2407 // A Load tagged with nonnull metadata is never null. 2408 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 2409 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) 2410 return true; 2411 2412 if (const auto *Call = dyn_cast<CallBase>(V)) { 2413 if (Call->isReturnNonNull()) 2414 return true; 2415 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) 2416 return isKnownNonZero(RP, Depth, Q); 2417 } 2418 } 2419 2420 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2421 return true; 2422 2423 // Check for recursive pointer simplifications. 2424 if (V->getType()->isPointerTy()) { 2425 // Look through bitcast operations, GEPs, and int2ptr instructions as they 2426 // do not alter the value, or at least not the nullness property of the 2427 // value, e.g., int2ptr is allowed to zero/sign extend the value. 2428 // 2429 // Note that we have to take special care to avoid looking through 2430 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well 2431 // as casts that can alter the value, e.g., AddrSpaceCasts. 2432 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 2433 return isGEPKnownNonNull(GEP, Depth, Q); 2434 2435 if (auto *BCO = dyn_cast<BitCastOperator>(V)) 2436 return isKnownNonZero(BCO->getOperand(0), Depth, Q); 2437 2438 if (auto *I2P = dyn_cast<IntToPtrInst>(V)) 2439 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <= 2440 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize()) 2441 return isKnownNonZero(I2P->getOperand(0), Depth, Q); 2442 } 2443 2444 // Similar to int2ptr above, we can look through ptr2int here if the cast 2445 // is a no-op or an extend and not a truncate. 2446 if (auto *P2I = dyn_cast<PtrToIntInst>(V)) 2447 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <= 2448 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize()) 2449 return isKnownNonZero(P2I->getOperand(0), Depth, Q); 2450 2451 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 2452 2453 // X | Y != 0 if X != 0 or Y != 0. 2454 Value *X = nullptr, *Y = nullptr; 2455 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 2456 return isKnownNonZero(X, DemandedElts, Depth, Q) || 2457 isKnownNonZero(Y, DemandedElts, Depth, Q); 2458 2459 // ext X != 0 if X != 0. 2460 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 2461 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 2462 2463 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2464 // if the lowest bit is shifted off the end. 2465 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 2466 // shl nuw can't remove any non-zero bits. 2467 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2468 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2469 return isKnownNonZero(X, Depth, Q); 2470 2471 KnownBits Known(BitWidth); 2472 computeKnownBits(X, DemandedElts, Known, Depth, Q); 2473 if (Known.One[0]) 2474 return true; 2475 } 2476 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2477 // defined if the sign bit is shifted off the end. 2478 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2479 // shr exact can only shift out zero bits. 2480 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2481 if (BO->isExact()) 2482 return isKnownNonZero(X, Depth, Q); 2483 2484 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q); 2485 if (Known.isNegative()) 2486 return true; 2487 2488 // If the shifter operand is a constant, and all of the bits shifted 2489 // out are known to be zero, and X is known non-zero then at least one 2490 // non-zero bit must remain. 2491 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2492 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2493 // Is there a known one in the portion not shifted out? 2494 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2495 return true; 2496 // Are all the bits to be shifted out known zero? 2497 if (Known.countMinTrailingZeros() >= ShiftVal) 2498 return isKnownNonZero(X, DemandedElts, Depth, Q); 2499 } 2500 } 2501 // div exact can only produce a zero if the dividend is zero. 2502 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2503 return isKnownNonZero(X, DemandedElts, Depth, Q); 2504 } 2505 // X + Y. 2506 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2507 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); 2508 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); 2509 2510 // If X and Y are both non-negative (as signed values) then their sum is not 2511 // zero unless both X and Y are zero. 2512 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2513 if (isKnownNonZero(X, DemandedElts, Depth, Q) || 2514 isKnownNonZero(Y, DemandedElts, Depth, Q)) 2515 return true; 2516 2517 // If X and Y are both negative (as signed values) then their sum is not 2518 // zero unless both X and Y equal INT_MIN. 2519 if (XKnown.isNegative() && YKnown.isNegative()) { 2520 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2521 // The sign bit of X is set. If some other bit is set then X is not equal 2522 // to INT_MIN. 2523 if (XKnown.One.intersects(Mask)) 2524 return true; 2525 // The sign bit of Y is set. If some other bit is set then Y is not equal 2526 // to INT_MIN. 2527 if (YKnown.One.intersects(Mask)) 2528 return true; 2529 } 2530 2531 // The sum of a non-negative number and a power of two is not zero. 2532 if (XKnown.isNonNegative() && 2533 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2534 return true; 2535 if (YKnown.isNonNegative() && 2536 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2537 return true; 2538 } 2539 // X * Y. 2540 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2541 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2542 // If X and Y are non-zero then so is X * Y as long as the multiplication 2543 // does not overflow. 2544 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && 2545 isKnownNonZero(X, DemandedElts, Depth, Q) && 2546 isKnownNonZero(Y, DemandedElts, Depth, Q)) 2547 return true; 2548 } 2549 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2550 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2551 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) && 2552 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q)) 2553 return true; 2554 } 2555 // PHI 2556 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2557 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN)) 2558 return true; 2559 2560 // Check if all incoming values are non-zero using recursion. 2561 Query RecQ = Q; 2562 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); 2563 return llvm::all_of(PN->operands(), [&](const Use &U) { 2564 if (U.get() == PN) 2565 return true; 2566 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); 2567 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ); 2568 }); 2569 } 2570 // ExtractElement 2571 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) { 2572 const Value *Vec = EEI->getVectorOperand(); 2573 const Value *Idx = EEI->getIndexOperand(); 2574 auto *CIdx = dyn_cast<ConstantInt>(Idx); 2575 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { 2576 unsigned NumElts = VecTy->getNumElements(); 2577 APInt DemandedVecElts = APInt::getAllOnes(NumElts); 2578 if (CIdx && CIdx->getValue().ult(NumElts)) 2579 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 2580 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q); 2581 } 2582 } 2583 // Freeze 2584 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) { 2585 auto *Op = FI->getOperand(0); 2586 if (isKnownNonZero(Op, Depth, Q) && 2587 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth)) 2588 return true; 2589 } 2590 2591 KnownBits Known(BitWidth); 2592 computeKnownBits(V, DemandedElts, Known, Depth, Q); 2593 return Known.One != 0; 2594 } 2595 2596 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { 2597 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2598 // vector 2599 if (isa<ScalableVectorType>(V->getType())) 2600 return false; 2601 2602 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 2603 APInt DemandedElts = 2604 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 2605 return isKnownNonZero(V, DemandedElts, Depth, Q); 2606 } 2607 2608 /// If the pair of operators are the same invertible function, return the 2609 /// the operands of the function corresponding to each input. Otherwise, 2610 /// return None. An invertible function is one that is 1-to-1 and maps 2611 /// every input value to exactly one output value. This is equivalent to 2612 /// saying that Op1 and Op2 are equal exactly when the specified pair of 2613 /// operands are equal, (except that Op1 and Op2 may be poison more often.) 2614 static Optional<std::pair<Value*, Value*>> 2615 getInvertibleOperands(const Operator *Op1, 2616 const Operator *Op2) { 2617 if (Op1->getOpcode() != Op2->getOpcode()) 2618 return None; 2619 2620 auto getOperands = [&](unsigned OpNum) -> auto { 2621 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum)); 2622 }; 2623 2624 switch (Op1->getOpcode()) { 2625 default: 2626 break; 2627 case Instruction::Add: 2628 case Instruction::Sub: 2629 if (Op1->getOperand(0) == Op2->getOperand(0)) 2630 return getOperands(1); 2631 if (Op1->getOperand(1) == Op2->getOperand(1)) 2632 return getOperands(0); 2633 break; 2634 case Instruction::Mul: { 2635 // invertible if A * B == (A * B) mod 2^N where A, and B are integers 2636 // and N is the bitwdith. The nsw case is non-obvious, but proven by 2637 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK 2638 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2639 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); 2640 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && 2641 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) 2642 break; 2643 2644 // Assume operand order has been canonicalized 2645 if (Op1->getOperand(1) == Op2->getOperand(1) && 2646 isa<ConstantInt>(Op1->getOperand(1)) && 2647 !cast<ConstantInt>(Op1->getOperand(1))->isZero()) 2648 return getOperands(0); 2649 break; 2650 } 2651 case Instruction::Shl: { 2652 // Same as multiplies, with the difference that we don't need to check 2653 // for a non-zero multiply. Shifts always multiply by non-zero. 2654 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2655 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); 2656 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && 2657 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) 2658 break; 2659 2660 if (Op1->getOperand(1) == Op2->getOperand(1)) 2661 return getOperands(0); 2662 break; 2663 } 2664 case Instruction::AShr: 2665 case Instruction::LShr: { 2666 auto *PEO1 = cast<PossiblyExactOperator>(Op1); 2667 auto *PEO2 = cast<PossiblyExactOperator>(Op2); 2668 if (!PEO1->isExact() || !PEO2->isExact()) 2669 break; 2670 2671 if (Op1->getOperand(1) == Op2->getOperand(1)) 2672 return getOperands(0); 2673 break; 2674 } 2675 case Instruction::SExt: 2676 case Instruction::ZExt: 2677 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType()) 2678 return getOperands(0); 2679 break; 2680 case Instruction::PHI: { 2681 const PHINode *PN1 = cast<PHINode>(Op1); 2682 const PHINode *PN2 = cast<PHINode>(Op2); 2683 2684 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences 2685 // are a single invertible function of the start values? Note that repeated 2686 // application of an invertible function is also invertible 2687 BinaryOperator *BO1 = nullptr; 2688 Value *Start1 = nullptr, *Step1 = nullptr; 2689 BinaryOperator *BO2 = nullptr; 2690 Value *Start2 = nullptr, *Step2 = nullptr; 2691 if (PN1->getParent() != PN2->getParent() || 2692 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) || 2693 !matchSimpleRecurrence(PN2, BO2, Start2, Step2)) 2694 break; 2695 2696 auto Values = getInvertibleOperands(cast<Operator>(BO1), 2697 cast<Operator>(BO2)); 2698 if (!Values) 2699 break; 2700 2701 // We have to be careful of mutually defined recurrences here. Ex: 2702 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V 2703 // * X_i = Y_i = X_(i-1) OP Y_(i-1) 2704 // The invertibility of these is complicated, and not worth reasoning 2705 // about (yet?). 2706 if (Values->first != PN1 || Values->second != PN2) 2707 break; 2708 2709 return std::make_pair(Start1, Start2); 2710 } 2711 } 2712 return None; 2713 } 2714 2715 /// Return true if V2 == V1 + X, where X is known non-zero. 2716 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, 2717 const Query &Q) { 2718 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2719 if (!BO || BO->getOpcode() != Instruction::Add) 2720 return false; 2721 Value *Op = nullptr; 2722 if (V2 == BO->getOperand(0)) 2723 Op = BO->getOperand(1); 2724 else if (V2 == BO->getOperand(1)) 2725 Op = BO->getOperand(0); 2726 else 2727 return false; 2728 return isKnownNonZero(Op, Depth + 1, Q); 2729 } 2730 2731 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and 2732 /// the multiplication is nuw or nsw. 2733 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, 2734 const Query &Q) { 2735 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { 2736 const APInt *C; 2737 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && 2738 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && 2739 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q); 2740 } 2741 return false; 2742 } 2743 2744 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and 2745 /// the shift is nuw or nsw. 2746 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, 2747 const Query &Q) { 2748 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { 2749 const APInt *C; 2750 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && 2751 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && 2752 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q); 2753 } 2754 return false; 2755 } 2756 2757 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, 2758 unsigned Depth, const Query &Q) { 2759 // Check two PHIs are in same block. 2760 if (PN1->getParent() != PN2->getParent()) 2761 return false; 2762 2763 SmallPtrSet<const BasicBlock *, 8> VisitedBBs; 2764 bool UsedFullRecursion = false; 2765 for (const BasicBlock *IncomBB : PN1->blocks()) { 2766 if (!VisitedBBs.insert(IncomBB).second) 2767 continue; // Don't reprocess blocks that we have dealt with already. 2768 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB); 2769 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB); 2770 const APInt *C1, *C2; 2771 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2) 2772 continue; 2773 2774 // Only one pair of phi operands is allowed for full recursion. 2775 if (UsedFullRecursion) 2776 return false; 2777 2778 Query RecQ = Q; 2779 RecQ.CxtI = IncomBB->getTerminator(); 2780 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ)) 2781 return false; 2782 UsedFullRecursion = true; 2783 } 2784 return true; 2785 } 2786 2787 /// Return true if it is known that V1 != V2. 2788 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, 2789 const Query &Q) { 2790 if (V1 == V2) 2791 return false; 2792 if (V1->getType() != V2->getType()) 2793 // We can't look through casts yet. 2794 return false; 2795 2796 if (Depth >= MaxAnalysisRecursionDepth) 2797 return false; 2798 2799 // See if we can recurse through (exactly one of) our operands. This 2800 // requires our operation be 1-to-1 and map every input value to exactly 2801 // one output value. Such an operation is invertible. 2802 auto *O1 = dyn_cast<Operator>(V1); 2803 auto *O2 = dyn_cast<Operator>(V2); 2804 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { 2805 if (auto Values = getInvertibleOperands(O1, O2)) 2806 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q); 2807 2808 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) { 2809 const PHINode *PN2 = cast<PHINode>(V2); 2810 // FIXME: This is missing a generalization to handle the case where one is 2811 // a PHI and another one isn't. 2812 if (isNonEqualPHIs(PN1, PN2, Depth, Q)) 2813 return true; 2814 }; 2815 } 2816 2817 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q)) 2818 return true; 2819 2820 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q)) 2821 return true; 2822 2823 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q)) 2824 return true; 2825 2826 if (V1->getType()->isIntOrIntVectorTy()) { 2827 // Are any known bits in V1 contradictory to known bits in V2? If V1 2828 // has a known zero where V2 has a known one, they must not be equal. 2829 KnownBits Known1 = computeKnownBits(V1, Depth, Q); 2830 KnownBits Known2 = computeKnownBits(V2, Depth, Q); 2831 2832 if (Known1.Zero.intersects(Known2.One) || 2833 Known2.Zero.intersects(Known1.One)) 2834 return true; 2835 } 2836 return false; 2837 } 2838 2839 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2840 /// simplify operations downstream. Mask is known to be zero for bits that V 2841 /// cannot have. 2842 /// 2843 /// This function is defined on values with integer type, values with pointer 2844 /// type, and vectors of integers. In the case 2845 /// where V is a vector, the mask, known zero, and known one values are the 2846 /// same width as the vector element, and the bit is set only if it is true 2847 /// for all of the elements in the vector. 2848 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2849 const Query &Q) { 2850 KnownBits Known(Mask.getBitWidth()); 2851 computeKnownBits(V, Known, Depth, Q); 2852 return Mask.isSubsetOf(Known.Zero); 2853 } 2854 2855 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 2856 // Returns the input and lower/upper bounds. 2857 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 2858 const APInt *&CLow, const APInt *&CHigh) { 2859 assert(isa<Operator>(Select) && 2860 cast<Operator>(Select)->getOpcode() == Instruction::Select && 2861 "Input should be a Select!"); 2862 2863 const Value *LHS = nullptr, *RHS = nullptr; 2864 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 2865 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 2866 return false; 2867 2868 if (!match(RHS, m_APInt(CLow))) 2869 return false; 2870 2871 const Value *LHS2 = nullptr, *RHS2 = nullptr; 2872 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 2873 if (getInverseMinMaxFlavor(SPF) != SPF2) 2874 return false; 2875 2876 if (!match(RHS2, m_APInt(CHigh))) 2877 return false; 2878 2879 if (SPF == SPF_SMIN) 2880 std::swap(CLow, CHigh); 2881 2882 In = LHS2; 2883 return CLow->sle(*CHigh); 2884 } 2885 2886 /// For vector constants, loop over the elements and find the constant with the 2887 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2888 /// or if any element was not analyzed; otherwise, return the count for the 2889 /// element with the minimum number of sign bits. 2890 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2891 const APInt &DemandedElts, 2892 unsigned TyBits) { 2893 const auto *CV = dyn_cast<Constant>(V); 2894 if (!CV || !isa<FixedVectorType>(CV->getType())) 2895 return 0; 2896 2897 unsigned MinSignBits = TyBits; 2898 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); 2899 for (unsigned i = 0; i != NumElts; ++i) { 2900 if (!DemandedElts[i]) 2901 continue; 2902 // If we find a non-ConstantInt, bail out. 2903 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2904 if (!Elt) 2905 return 0; 2906 2907 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2908 } 2909 2910 return MinSignBits; 2911 } 2912 2913 static unsigned ComputeNumSignBitsImpl(const Value *V, 2914 const APInt &DemandedElts, 2915 unsigned Depth, const Query &Q); 2916 2917 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 2918 unsigned Depth, const Query &Q) { 2919 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); 2920 assert(Result > 0 && "At least one sign bit needs to be present!"); 2921 return Result; 2922 } 2923 2924 /// Return the number of times the sign bit of the register is replicated into 2925 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2926 /// (itself), but other cases can give us information. For example, immediately 2927 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2928 /// other, so we return 3. For vectors, return the number of sign bits for the 2929 /// vector element with the minimum number of known sign bits of the demanded 2930 /// elements in the vector specified by DemandedElts. 2931 static unsigned ComputeNumSignBitsImpl(const Value *V, 2932 const APInt &DemandedElts, 2933 unsigned Depth, const Query &Q) { 2934 Type *Ty = V->getType(); 2935 2936 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2937 // vector 2938 if (isa<ScalableVectorType>(Ty)) 2939 return 1; 2940 2941 #ifndef NDEBUG 2942 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 2943 2944 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 2945 assert( 2946 FVTy->getNumElements() == DemandedElts.getBitWidth() && 2947 "DemandedElt width should equal the fixed vector number of elements"); 2948 } else { 2949 assert(DemandedElts == APInt(1, 1) && 2950 "DemandedElt width should be 1 for scalars"); 2951 } 2952 #endif 2953 2954 // We return the minimum number of sign bits that are guaranteed to be present 2955 // in V, so for undef we have to conservatively return 1. We don't have the 2956 // same behavior for poison though -- that's a FIXME today. 2957 2958 Type *ScalarTy = Ty->getScalarType(); 2959 unsigned TyBits = ScalarTy->isPointerTy() ? 2960 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 2961 Q.DL.getTypeSizeInBits(ScalarTy); 2962 2963 unsigned Tmp, Tmp2; 2964 unsigned FirstAnswer = 1; 2965 2966 // Note that ConstantInt is handled by the general computeKnownBits case 2967 // below. 2968 2969 if (Depth == MaxAnalysisRecursionDepth) 2970 return 1; 2971 2972 if (auto *U = dyn_cast<Operator>(V)) { 2973 switch (Operator::getOpcode(V)) { 2974 default: break; 2975 case Instruction::SExt: 2976 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2977 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2978 2979 case Instruction::SDiv: { 2980 const APInt *Denominator; 2981 // sdiv X, C -> adds log(C) sign bits. 2982 if (match(U->getOperand(1), m_APInt(Denominator))) { 2983 2984 // Ignore non-positive denominator. 2985 if (!Denominator->isStrictlyPositive()) 2986 break; 2987 2988 // Calculate the incoming numerator bits. 2989 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2990 2991 // Add floor(log(C)) bits to the numerator bits. 2992 return std::min(TyBits, NumBits + Denominator->logBase2()); 2993 } 2994 break; 2995 } 2996 2997 case Instruction::SRem: { 2998 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2999 3000 const APInt *Denominator; 3001 // srem X, C -> we know that the result is within [-C+1,C) when C is a 3002 // positive constant. This let us put a lower bound on the number of sign 3003 // bits. 3004 if (match(U->getOperand(1), m_APInt(Denominator))) { 3005 3006 // Ignore non-positive denominator. 3007 if (Denominator->isStrictlyPositive()) { 3008 // Calculate the leading sign bit constraints by examining the 3009 // denominator. Given that the denominator is positive, there are two 3010 // cases: 3011 // 3012 // 1. The numerator is positive. The result range is [0,C) and 3013 // [0,C) u< (1 << ceilLogBase2(C)). 3014 // 3015 // 2. The numerator is negative. Then the result range is (-C,0] and 3016 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 3017 // 3018 // Thus a lower bound on the number of sign bits is `TyBits - 3019 // ceilLogBase2(C)`. 3020 3021 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 3022 Tmp = std::max(Tmp, ResBits); 3023 } 3024 } 3025 return Tmp; 3026 } 3027 3028 case Instruction::AShr: { 3029 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3030 // ashr X, C -> adds C sign bits. Vectors too. 3031 const APInt *ShAmt; 3032 if (match(U->getOperand(1), m_APInt(ShAmt))) { 3033 if (ShAmt->uge(TyBits)) 3034 break; // Bad shift. 3035 unsigned ShAmtLimited = ShAmt->getZExtValue(); 3036 Tmp += ShAmtLimited; 3037 if (Tmp > TyBits) Tmp = TyBits; 3038 } 3039 return Tmp; 3040 } 3041 case Instruction::Shl: { 3042 const APInt *ShAmt; 3043 if (match(U->getOperand(1), m_APInt(ShAmt))) { 3044 // shl destroys sign bits. 3045 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3046 if (ShAmt->uge(TyBits) || // Bad shift. 3047 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 3048 Tmp2 = ShAmt->getZExtValue(); 3049 return Tmp - Tmp2; 3050 } 3051 break; 3052 } 3053 case Instruction::And: 3054 case Instruction::Or: 3055 case Instruction::Xor: // NOT is handled here. 3056 // Logical binary ops preserve the number of sign bits at the worst. 3057 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3058 if (Tmp != 1) { 3059 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3060 FirstAnswer = std::min(Tmp, Tmp2); 3061 // We computed what we know about the sign bits as our first 3062 // answer. Now proceed to the generic code that uses 3063 // computeKnownBits, and pick whichever answer is better. 3064 } 3065 break; 3066 3067 case Instruction::Select: { 3068 // If we have a clamp pattern, we know that the number of sign bits will 3069 // be the minimum of the clamp min/max range. 3070 const Value *X; 3071 const APInt *CLow, *CHigh; 3072 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 3073 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 3074 3075 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3076 if (Tmp == 1) break; 3077 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 3078 return std::min(Tmp, Tmp2); 3079 } 3080 3081 case Instruction::Add: 3082 // Add can have at most one carry bit. Thus we know that the output 3083 // is, at worst, one more bit than the inputs. 3084 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3085 if (Tmp == 1) break; 3086 3087 // Special case decrementing a value (ADD X, -1): 3088 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 3089 if (CRHS->isAllOnesValue()) { 3090 KnownBits Known(TyBits); 3091 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 3092 3093 // If the input is known to be 0 or 1, the output is 0/-1, which is 3094 // all sign bits set. 3095 if ((Known.Zero | 1).isAllOnes()) 3096 return TyBits; 3097 3098 // If we are subtracting one from a positive number, there is no carry 3099 // out of the result. 3100 if (Known.isNonNegative()) 3101 return Tmp; 3102 } 3103 3104 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3105 if (Tmp2 == 1) break; 3106 return std::min(Tmp, Tmp2) - 1; 3107 3108 case Instruction::Sub: 3109 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3110 if (Tmp2 == 1) break; 3111 3112 // Handle NEG. 3113 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 3114 if (CLHS->isNullValue()) { 3115 KnownBits Known(TyBits); 3116 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 3117 // If the input is known to be 0 or 1, the output is 0/-1, which is 3118 // all sign bits set. 3119 if ((Known.Zero | 1).isAllOnes()) 3120 return TyBits; 3121 3122 // If the input is known to be positive (the sign bit is known clear), 3123 // the output of the NEG has the same number of sign bits as the 3124 // input. 3125 if (Known.isNonNegative()) 3126 return Tmp2; 3127 3128 // Otherwise, we treat this like a SUB. 3129 } 3130 3131 // Sub can have at most one carry bit. Thus we know that the output 3132 // is, at worst, one more bit than the inputs. 3133 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3134 if (Tmp == 1) break; 3135 return std::min(Tmp, Tmp2) - 1; 3136 3137 case Instruction::Mul: { 3138 // The output of the Mul can be at most twice the valid bits in the 3139 // inputs. 3140 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3141 if (SignBitsOp0 == 1) break; 3142 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3143 if (SignBitsOp1 == 1) break; 3144 unsigned OutValidBits = 3145 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 3146 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 3147 } 3148 3149 case Instruction::PHI: { 3150 const PHINode *PN = cast<PHINode>(U); 3151 unsigned NumIncomingValues = PN->getNumIncomingValues(); 3152 // Don't analyze large in-degree PHIs. 3153 if (NumIncomingValues > 4) break; 3154 // Unreachable blocks may have zero-operand PHI nodes. 3155 if (NumIncomingValues == 0) break; 3156 3157 // Take the minimum of all incoming values. This can't infinitely loop 3158 // because of our depth threshold. 3159 Query RecQ = Q; 3160 Tmp = TyBits; 3161 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) { 3162 if (Tmp == 1) return Tmp; 3163 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); 3164 Tmp = std::min( 3165 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ)); 3166 } 3167 return Tmp; 3168 } 3169 3170 case Instruction::Trunc: 3171 // FIXME: it's tricky to do anything useful for this, but it is an 3172 // important case for targets like X86. 3173 break; 3174 3175 case Instruction::ExtractElement: 3176 // Look through extract element. At the moment we keep this simple and 3177 // skip tracking the specific element. But at least we might find 3178 // information valid for all elements of the vector (for example if vector 3179 // is sign extended, shifted, etc). 3180 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3181 3182 case Instruction::ShuffleVector: { 3183 // Collect the minimum number of sign bits that are shared by every vector 3184 // element referenced by the shuffle. 3185 auto *Shuf = dyn_cast<ShuffleVectorInst>(U); 3186 if (!Shuf) { 3187 // FIXME: Add support for shufflevector constant expressions. 3188 return 1; 3189 } 3190 APInt DemandedLHS, DemandedRHS; 3191 // For undef elements, we don't know anything about the common state of 3192 // the shuffle result. 3193 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) 3194 return 1; 3195 Tmp = std::numeric_limits<unsigned>::max(); 3196 if (!!DemandedLHS) { 3197 const Value *LHS = Shuf->getOperand(0); 3198 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); 3199 } 3200 // If we don't know anything, early out and try computeKnownBits 3201 // fall-back. 3202 if (Tmp == 1) 3203 break; 3204 if (!!DemandedRHS) { 3205 const Value *RHS = Shuf->getOperand(1); 3206 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); 3207 Tmp = std::min(Tmp, Tmp2); 3208 } 3209 // If we don't know anything, early out and try computeKnownBits 3210 // fall-back. 3211 if (Tmp == 1) 3212 break; 3213 assert(Tmp <= TyBits && "Failed to determine minimum sign bits"); 3214 return Tmp; 3215 } 3216 case Instruction::Call: { 3217 if (const auto *II = dyn_cast<IntrinsicInst>(U)) { 3218 switch (II->getIntrinsicID()) { 3219 default: break; 3220 case Intrinsic::abs: 3221 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3222 if (Tmp == 1) break; 3223 3224 // Absolute value reduces number of sign bits by at most 1. 3225 return Tmp - 1; 3226 } 3227 } 3228 } 3229 } 3230 } 3231 3232 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3233 // use this information. 3234 3235 // If we can examine all elements of a vector constant successfully, we're 3236 // done (we can't do any better than that). If not, keep trying. 3237 if (unsigned VecSignBits = 3238 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) 3239 return VecSignBits; 3240 3241 KnownBits Known(TyBits); 3242 computeKnownBits(V, DemandedElts, Known, Depth, Q); 3243 3244 // If we know that the sign bit is either zero or one, determine the number of 3245 // identical bits in the top of the input value. 3246 return std::max(FirstAnswer, Known.countMinSignBits()); 3247 } 3248 3249 /// This function computes the integer multiple of Base that equals V. 3250 /// If successful, it returns true and returns the multiple in 3251 /// Multiple. If unsuccessful, it returns false. It looks 3252 /// through SExt instructions only if LookThroughSExt is true. 3253 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 3254 bool LookThroughSExt, unsigned Depth) { 3255 assert(V && "No Value?"); 3256 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 3257 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 3258 3259 Type *T = V->getType(); 3260 3261 ConstantInt *CI = dyn_cast<ConstantInt>(V); 3262 3263 if (Base == 0) 3264 return false; 3265 3266 if (Base == 1) { 3267 Multiple = V; 3268 return true; 3269 } 3270 3271 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 3272 Constant *BaseVal = ConstantInt::get(T, Base); 3273 if (CO && CO == BaseVal) { 3274 // Multiple is 1. 3275 Multiple = ConstantInt::get(T, 1); 3276 return true; 3277 } 3278 3279 if (CI && CI->getZExtValue() % Base == 0) { 3280 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 3281 return true; 3282 } 3283 3284 if (Depth == MaxAnalysisRecursionDepth) return false; 3285 3286 Operator *I = dyn_cast<Operator>(V); 3287 if (!I) return false; 3288 3289 switch (I->getOpcode()) { 3290 default: break; 3291 case Instruction::SExt: 3292 if (!LookThroughSExt) return false; 3293 // otherwise fall through to ZExt 3294 LLVM_FALLTHROUGH; 3295 case Instruction::ZExt: 3296 return ComputeMultiple(I->getOperand(0), Base, Multiple, 3297 LookThroughSExt, Depth+1); 3298 case Instruction::Shl: 3299 case Instruction::Mul: { 3300 Value *Op0 = I->getOperand(0); 3301 Value *Op1 = I->getOperand(1); 3302 3303 if (I->getOpcode() == Instruction::Shl) { 3304 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 3305 if (!Op1CI) return false; 3306 // Turn Op0 << Op1 into Op0 * 2^Op1 3307 APInt Op1Int = Op1CI->getValue(); 3308 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 3309 APInt API(Op1Int.getBitWidth(), 0); 3310 API.setBit(BitToSet); 3311 Op1 = ConstantInt::get(V->getContext(), API); 3312 } 3313 3314 Value *Mul0 = nullptr; 3315 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 3316 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 3317 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 3318 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() < 3319 MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) 3320 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 3321 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() > 3322 MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) 3323 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 3324 3325 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 3326 Multiple = ConstantExpr::getMul(MulC, Op1C); 3327 return true; 3328 } 3329 3330 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 3331 if (Mul0CI->getValue() == 1) { 3332 // V == Base * Op1, so return Op1 3333 Multiple = Op1; 3334 return true; 3335 } 3336 } 3337 3338 Value *Mul1 = nullptr; 3339 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 3340 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 3341 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 3342 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() < 3343 MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) 3344 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 3345 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() > 3346 MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) 3347 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 3348 3349 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 3350 Multiple = ConstantExpr::getMul(MulC, Op0C); 3351 return true; 3352 } 3353 3354 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 3355 if (Mul1CI->getValue() == 1) { 3356 // V == Base * Op0, so return Op0 3357 Multiple = Op0; 3358 return true; 3359 } 3360 } 3361 } 3362 } 3363 3364 // We could not determine if V is a multiple of Base. 3365 return false; 3366 } 3367 3368 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB, 3369 const TargetLibraryInfo *TLI) { 3370 const Function *F = CB.getCalledFunction(); 3371 if (!F) 3372 return Intrinsic::not_intrinsic; 3373 3374 if (F->isIntrinsic()) 3375 return F->getIntrinsicID(); 3376 3377 // We are going to infer semantics of a library function based on mapping it 3378 // to an LLVM intrinsic. Check that the library function is available from 3379 // this callbase and in this environment. 3380 LibFunc Func; 3381 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || 3382 !CB.onlyReadsMemory()) 3383 return Intrinsic::not_intrinsic; 3384 3385 switch (Func) { 3386 default: 3387 break; 3388 case LibFunc_sin: 3389 case LibFunc_sinf: 3390 case LibFunc_sinl: 3391 return Intrinsic::sin; 3392 case LibFunc_cos: 3393 case LibFunc_cosf: 3394 case LibFunc_cosl: 3395 return Intrinsic::cos; 3396 case LibFunc_exp: 3397 case LibFunc_expf: 3398 case LibFunc_expl: 3399 return Intrinsic::exp; 3400 case LibFunc_exp2: 3401 case LibFunc_exp2f: 3402 case LibFunc_exp2l: 3403 return Intrinsic::exp2; 3404 case LibFunc_log: 3405 case LibFunc_logf: 3406 case LibFunc_logl: 3407 return Intrinsic::log; 3408 case LibFunc_log10: 3409 case LibFunc_log10f: 3410 case LibFunc_log10l: 3411 return Intrinsic::log10; 3412 case LibFunc_log2: 3413 case LibFunc_log2f: 3414 case LibFunc_log2l: 3415 return Intrinsic::log2; 3416 case LibFunc_fabs: 3417 case LibFunc_fabsf: 3418 case LibFunc_fabsl: 3419 return Intrinsic::fabs; 3420 case LibFunc_fmin: 3421 case LibFunc_fminf: 3422 case LibFunc_fminl: 3423 return Intrinsic::minnum; 3424 case LibFunc_fmax: 3425 case LibFunc_fmaxf: 3426 case LibFunc_fmaxl: 3427 return Intrinsic::maxnum; 3428 case LibFunc_copysign: 3429 case LibFunc_copysignf: 3430 case LibFunc_copysignl: 3431 return Intrinsic::copysign; 3432 case LibFunc_floor: 3433 case LibFunc_floorf: 3434 case LibFunc_floorl: 3435 return Intrinsic::floor; 3436 case LibFunc_ceil: 3437 case LibFunc_ceilf: 3438 case LibFunc_ceill: 3439 return Intrinsic::ceil; 3440 case LibFunc_trunc: 3441 case LibFunc_truncf: 3442 case LibFunc_truncl: 3443 return Intrinsic::trunc; 3444 case LibFunc_rint: 3445 case LibFunc_rintf: 3446 case LibFunc_rintl: 3447 return Intrinsic::rint; 3448 case LibFunc_nearbyint: 3449 case LibFunc_nearbyintf: 3450 case LibFunc_nearbyintl: 3451 return Intrinsic::nearbyint; 3452 case LibFunc_round: 3453 case LibFunc_roundf: 3454 case LibFunc_roundl: 3455 return Intrinsic::round; 3456 case LibFunc_roundeven: 3457 case LibFunc_roundevenf: 3458 case LibFunc_roundevenl: 3459 return Intrinsic::roundeven; 3460 case LibFunc_pow: 3461 case LibFunc_powf: 3462 case LibFunc_powl: 3463 return Intrinsic::pow; 3464 case LibFunc_sqrt: 3465 case LibFunc_sqrtf: 3466 case LibFunc_sqrtl: 3467 return Intrinsic::sqrt; 3468 } 3469 3470 return Intrinsic::not_intrinsic; 3471 } 3472 3473 /// Return true if we can prove that the specified FP value is never equal to 3474 /// -0.0. 3475 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee 3476 /// that a value is not -0.0. It only guarantees that -0.0 may be treated 3477 /// the same as +0.0 in floating-point ops. 3478 /// 3479 /// NOTE: this function will need to be revisited when we support non-default 3480 /// rounding modes! 3481 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 3482 unsigned Depth) { 3483 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3484 return !CFP->getValueAPF().isNegZero(); 3485 3486 if (Depth == MaxAnalysisRecursionDepth) 3487 return false; 3488 3489 auto *Op = dyn_cast<Operator>(V); 3490 if (!Op) 3491 return false; 3492 3493 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 3494 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 3495 return true; 3496 3497 // sitofp and uitofp turn into +0.0 for zero. 3498 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 3499 return true; 3500 3501 if (auto *Call = dyn_cast<CallInst>(Op)) { 3502 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI); 3503 switch (IID) { 3504 default: 3505 break; 3506 // sqrt(-0.0) = -0.0, no other negative results are possible. 3507 case Intrinsic::sqrt: 3508 case Intrinsic::canonicalize: 3509 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 3510 // fabs(x) != -0.0 3511 case Intrinsic::fabs: 3512 return true; 3513 } 3514 } 3515 3516 return false; 3517 } 3518 3519 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 3520 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 3521 /// bit despite comparing equal. 3522 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 3523 const TargetLibraryInfo *TLI, 3524 bool SignBitOnly, 3525 unsigned Depth) { 3526 // TODO: This function does not do the right thing when SignBitOnly is true 3527 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 3528 // which flips the sign bits of NaNs. See 3529 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3530 3531 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 3532 return !CFP->getValueAPF().isNegative() || 3533 (!SignBitOnly && CFP->getValueAPF().isZero()); 3534 } 3535 3536 // Handle vector of constants. 3537 if (auto *CV = dyn_cast<Constant>(V)) { 3538 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) { 3539 unsigned NumElts = CVFVTy->getNumElements(); 3540 for (unsigned i = 0; i != NumElts; ++i) { 3541 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 3542 if (!CFP) 3543 return false; 3544 if (CFP->getValueAPF().isNegative() && 3545 (SignBitOnly || !CFP->getValueAPF().isZero())) 3546 return false; 3547 } 3548 3549 // All non-negative ConstantFPs. 3550 return true; 3551 } 3552 } 3553 3554 if (Depth == MaxAnalysisRecursionDepth) 3555 return false; 3556 3557 const Operator *I = dyn_cast<Operator>(V); 3558 if (!I) 3559 return false; 3560 3561 switch (I->getOpcode()) { 3562 default: 3563 break; 3564 // Unsigned integers are always nonnegative. 3565 case Instruction::UIToFP: 3566 return true; 3567 case Instruction::FMul: 3568 case Instruction::FDiv: 3569 // X * X is always non-negative or a NaN. 3570 // X / X is always exactly 1.0 or a NaN. 3571 if (I->getOperand(0) == I->getOperand(1) && 3572 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 3573 return true; 3574 3575 LLVM_FALLTHROUGH; 3576 case Instruction::FAdd: 3577 case Instruction::FRem: 3578 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3579 Depth + 1) && 3580 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3581 Depth + 1); 3582 case Instruction::Select: 3583 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3584 Depth + 1) && 3585 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3586 Depth + 1); 3587 case Instruction::FPExt: 3588 case Instruction::FPTrunc: 3589 // Widening/narrowing never change sign. 3590 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3591 Depth + 1); 3592 case Instruction::ExtractElement: 3593 // Look through extract element. At the moment we keep this simple and skip 3594 // tracking the specific element. But at least we might find information 3595 // valid for all elements of the vector. 3596 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3597 Depth + 1); 3598 case Instruction::Call: 3599 const auto *CI = cast<CallInst>(I); 3600 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI); 3601 switch (IID) { 3602 default: 3603 break; 3604 case Intrinsic::maxnum: { 3605 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1); 3606 auto isPositiveNum = [&](Value *V) { 3607 if (SignBitOnly) { 3608 // With SignBitOnly, this is tricky because the result of 3609 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is 3610 // a constant strictly greater than 0.0. 3611 const APFloat *C; 3612 return match(V, m_APFloat(C)) && 3613 *C > APFloat::getZero(C->getSemantics()); 3614 } 3615 3616 // -0.0 compares equal to 0.0, so if this operand is at least -0.0, 3617 // maxnum can't be ordered-less-than-zero. 3618 return isKnownNeverNaN(V, TLI) && 3619 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1); 3620 }; 3621 3622 // TODO: This could be improved. We could also check that neither operand 3623 // has its sign bit set (and at least 1 is not-NAN?). 3624 return isPositiveNum(V0) || isPositiveNum(V1); 3625 } 3626 3627 case Intrinsic::maximum: 3628 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3629 Depth + 1) || 3630 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3631 Depth + 1); 3632 case Intrinsic::minnum: 3633 case Intrinsic::minimum: 3634 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3635 Depth + 1) && 3636 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3637 Depth + 1); 3638 case Intrinsic::exp: 3639 case Intrinsic::exp2: 3640 case Intrinsic::fabs: 3641 return true; 3642 3643 case Intrinsic::sqrt: 3644 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 3645 if (!SignBitOnly) 3646 return true; 3647 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 3648 CannotBeNegativeZero(CI->getOperand(0), TLI)); 3649 3650 case Intrinsic::powi: 3651 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 3652 // powi(x,n) is non-negative if n is even. 3653 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 3654 return true; 3655 } 3656 // TODO: This is not correct. Given that exp is an integer, here are the 3657 // ways that pow can return a negative value: 3658 // 3659 // pow(x, exp) --> negative if exp is odd and x is negative. 3660 // pow(-0, exp) --> -inf if exp is negative odd. 3661 // pow(-0, exp) --> -0 if exp is positive odd. 3662 // pow(-inf, exp) --> -0 if exp is negative odd. 3663 // pow(-inf, exp) --> -inf if exp is positive odd. 3664 // 3665 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 3666 // but we must return false if x == -0. Unfortunately we do not currently 3667 // have a way of expressing this constraint. See details in 3668 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3669 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3670 Depth + 1); 3671 3672 case Intrinsic::fma: 3673 case Intrinsic::fmuladd: 3674 // x*x+y is non-negative if y is non-negative. 3675 return I->getOperand(0) == I->getOperand(1) && 3676 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 3677 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3678 Depth + 1); 3679 } 3680 break; 3681 } 3682 return false; 3683 } 3684 3685 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 3686 const TargetLibraryInfo *TLI) { 3687 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 3688 } 3689 3690 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 3691 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 3692 } 3693 3694 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, 3695 unsigned Depth) { 3696 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type"); 3697 3698 // If we're told that infinities won't happen, assume they won't. 3699 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3700 if (FPMathOp->hasNoInfs()) 3701 return true; 3702 3703 // Handle scalar constants. 3704 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3705 return !CFP->isInfinity(); 3706 3707 if (Depth == MaxAnalysisRecursionDepth) 3708 return false; 3709 3710 if (auto *Inst = dyn_cast<Instruction>(V)) { 3711 switch (Inst->getOpcode()) { 3712 case Instruction::Select: { 3713 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) && 3714 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1); 3715 } 3716 case Instruction::SIToFP: 3717 case Instruction::UIToFP: { 3718 // Get width of largest magnitude integer (remove a bit if signed). 3719 // This still works for a signed minimum value because the largest FP 3720 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). 3721 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits(); 3722 if (Inst->getOpcode() == Instruction::SIToFP) 3723 --IntSize; 3724 3725 // If the exponent of the largest finite FP value can hold the largest 3726 // integer, the result of the cast must be finite. 3727 Type *FPTy = Inst->getType()->getScalarType(); 3728 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize; 3729 } 3730 default: 3731 break; 3732 } 3733 } 3734 3735 // try to handle fixed width vector constants 3736 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); 3737 if (VFVTy && isa<Constant>(V)) { 3738 // For vectors, verify that each element is not infinity. 3739 unsigned NumElts = VFVTy->getNumElements(); 3740 for (unsigned i = 0; i != NumElts; ++i) { 3741 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3742 if (!Elt) 3743 return false; 3744 if (isa<UndefValue>(Elt)) 3745 continue; 3746 auto *CElt = dyn_cast<ConstantFP>(Elt); 3747 if (!CElt || CElt->isInfinity()) 3748 return false; 3749 } 3750 // All elements were confirmed non-infinity or undefined. 3751 return true; 3752 } 3753 3754 // was not able to prove that V never contains infinity 3755 return false; 3756 } 3757 3758 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 3759 unsigned Depth) { 3760 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 3761 3762 // If we're told that NaNs won't happen, assume they won't. 3763 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3764 if (FPMathOp->hasNoNaNs()) 3765 return true; 3766 3767 // Handle scalar constants. 3768 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3769 return !CFP->isNaN(); 3770 3771 if (Depth == MaxAnalysisRecursionDepth) 3772 return false; 3773 3774 if (auto *Inst = dyn_cast<Instruction>(V)) { 3775 switch (Inst->getOpcode()) { 3776 case Instruction::FAdd: 3777 case Instruction::FSub: 3778 // Adding positive and negative infinity produces NaN. 3779 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && 3780 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3781 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) || 3782 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1)); 3783 3784 case Instruction::FMul: 3785 // Zero multiplied with infinity produces NaN. 3786 // FIXME: If neither side can be zero fmul never produces NaN. 3787 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && 3788 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && 3789 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3790 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); 3791 3792 case Instruction::FDiv: 3793 case Instruction::FRem: 3794 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN. 3795 return false; 3796 3797 case Instruction::Select: { 3798 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3799 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); 3800 } 3801 case Instruction::SIToFP: 3802 case Instruction::UIToFP: 3803 return true; 3804 case Instruction::FPTrunc: 3805 case Instruction::FPExt: 3806 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); 3807 default: 3808 break; 3809 } 3810 } 3811 3812 if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 3813 switch (II->getIntrinsicID()) { 3814 case Intrinsic::canonicalize: 3815 case Intrinsic::fabs: 3816 case Intrinsic::copysign: 3817 case Intrinsic::exp: 3818 case Intrinsic::exp2: 3819 case Intrinsic::floor: 3820 case Intrinsic::ceil: 3821 case Intrinsic::trunc: 3822 case Intrinsic::rint: 3823 case Intrinsic::nearbyint: 3824 case Intrinsic::round: 3825 case Intrinsic::roundeven: 3826 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); 3827 case Intrinsic::sqrt: 3828 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && 3829 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); 3830 case Intrinsic::minnum: 3831 case Intrinsic::maxnum: 3832 // If either operand is not NaN, the result is not NaN. 3833 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) || 3834 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1); 3835 default: 3836 return false; 3837 } 3838 } 3839 3840 // Try to handle fixed width vector constants 3841 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); 3842 if (VFVTy && isa<Constant>(V)) { 3843 // For vectors, verify that each element is not NaN. 3844 unsigned NumElts = VFVTy->getNumElements(); 3845 for (unsigned i = 0; i != NumElts; ++i) { 3846 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3847 if (!Elt) 3848 return false; 3849 if (isa<UndefValue>(Elt)) 3850 continue; 3851 auto *CElt = dyn_cast<ConstantFP>(Elt); 3852 if (!CElt || CElt->isNaN()) 3853 return false; 3854 } 3855 // All elements were confirmed not-NaN or undefined. 3856 return true; 3857 } 3858 3859 // Was not able to prove that V never contains NaN 3860 return false; 3861 } 3862 3863 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { 3864 3865 // All byte-wide stores are splatable, even of arbitrary variables. 3866 if (V->getType()->isIntegerTy(8)) 3867 return V; 3868 3869 LLVMContext &Ctx = V->getContext(); 3870 3871 // Undef don't care. 3872 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 3873 if (isa<UndefValue>(V)) 3874 return UndefInt8; 3875 3876 // Return Undef for zero-sized type. 3877 if (!DL.getTypeStoreSize(V->getType()).isNonZero()) 3878 return UndefInt8; 3879 3880 Constant *C = dyn_cast<Constant>(V); 3881 if (!C) { 3882 // Conceptually, we could handle things like: 3883 // %a = zext i8 %X to i16 3884 // %b = shl i16 %a, 8 3885 // %c = or i16 %a, %b 3886 // but until there is an example that actually needs this, it doesn't seem 3887 // worth worrying about. 3888 return nullptr; 3889 } 3890 3891 // Handle 'null' ConstantArrayZero etc. 3892 if (C->isNullValue()) 3893 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 3894 3895 // Constant floating-point values can be handled as integer values if the 3896 // corresponding integer value is "byteable". An important case is 0.0. 3897 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 3898 Type *Ty = nullptr; 3899 if (CFP->getType()->isHalfTy()) 3900 Ty = Type::getInt16Ty(Ctx); 3901 else if (CFP->getType()->isFloatTy()) 3902 Ty = Type::getInt32Ty(Ctx); 3903 else if (CFP->getType()->isDoubleTy()) 3904 Ty = Type::getInt64Ty(Ctx); 3905 // Don't handle long double formats, which have strange constraints. 3906 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL) 3907 : nullptr; 3908 } 3909 3910 // We can handle constant integers that are multiple of 8 bits. 3911 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 3912 if (CI->getBitWidth() % 8 == 0) { 3913 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 3914 if (!CI->getValue().isSplat(8)) 3915 return nullptr; 3916 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 3917 } 3918 } 3919 3920 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 3921 if (CE->getOpcode() == Instruction::IntToPtr) { 3922 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { 3923 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); 3924 return isBytewiseValue( 3925 ConstantExpr::getIntegerCast(CE->getOperand(0), 3926 Type::getIntNTy(Ctx, BitWidth), false), 3927 DL); 3928 } 3929 } 3930 } 3931 3932 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 3933 if (LHS == RHS) 3934 return LHS; 3935 if (!LHS || !RHS) 3936 return nullptr; 3937 if (LHS == UndefInt8) 3938 return RHS; 3939 if (RHS == UndefInt8) 3940 return LHS; 3941 return nullptr; 3942 }; 3943 3944 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 3945 Value *Val = UndefInt8; 3946 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 3947 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) 3948 return nullptr; 3949 return Val; 3950 } 3951 3952 if (isa<ConstantAggregate>(C)) { 3953 Value *Val = UndefInt8; 3954 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 3955 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) 3956 return nullptr; 3957 return Val; 3958 } 3959 3960 // Don't try to handle the handful of other constants. 3961 return nullptr; 3962 } 3963 3964 // This is the recursive version of BuildSubAggregate. It takes a few different 3965 // arguments. Idxs is the index within the nested struct From that we are 3966 // looking at now (which is of type IndexedType). IdxSkip is the number of 3967 // indices from Idxs that should be left out when inserting into the resulting 3968 // struct. To is the result struct built so far, new insertvalue instructions 3969 // build on that. 3970 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 3971 SmallVectorImpl<unsigned> &Idxs, 3972 unsigned IdxSkip, 3973 Instruction *InsertBefore) { 3974 StructType *STy = dyn_cast<StructType>(IndexedType); 3975 if (STy) { 3976 // Save the original To argument so we can modify it 3977 Value *OrigTo = To; 3978 // General case, the type indexed by Idxs is a struct 3979 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3980 // Process each struct element recursively 3981 Idxs.push_back(i); 3982 Value *PrevTo = To; 3983 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 3984 InsertBefore); 3985 Idxs.pop_back(); 3986 if (!To) { 3987 // Couldn't find any inserted value for this index? Cleanup 3988 while (PrevTo != OrigTo) { 3989 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 3990 PrevTo = Del->getAggregateOperand(); 3991 Del->eraseFromParent(); 3992 } 3993 // Stop processing elements 3994 break; 3995 } 3996 } 3997 // If we successfully found a value for each of our subaggregates 3998 if (To) 3999 return To; 4000 } 4001 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 4002 // the struct's elements had a value that was inserted directly. In the latter 4003 // case, perhaps we can't determine each of the subelements individually, but 4004 // we might be able to find the complete struct somewhere. 4005 4006 // Find the value that is at that particular spot 4007 Value *V = FindInsertedValue(From, Idxs); 4008 4009 if (!V) 4010 return nullptr; 4011 4012 // Insert the value in the new (sub) aggregate 4013 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 4014 "tmp", InsertBefore); 4015 } 4016 4017 // This helper takes a nested struct and extracts a part of it (which is again a 4018 // struct) into a new value. For example, given the struct: 4019 // { a, { b, { c, d }, e } } 4020 // and the indices "1, 1" this returns 4021 // { c, d }. 4022 // 4023 // It does this by inserting an insertvalue for each element in the resulting 4024 // struct, as opposed to just inserting a single struct. This will only work if 4025 // each of the elements of the substruct are known (ie, inserted into From by an 4026 // insertvalue instruction somewhere). 4027 // 4028 // All inserted insertvalue instructions are inserted before InsertBefore 4029 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 4030 Instruction *InsertBefore) { 4031 assert(InsertBefore && "Must have someplace to insert!"); 4032 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 4033 idx_range); 4034 Value *To = UndefValue::get(IndexedType); 4035 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 4036 unsigned IdxSkip = Idxs.size(); 4037 4038 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 4039 } 4040 4041 /// Given an aggregate and a sequence of indices, see if the scalar value 4042 /// indexed is already around as a register, for example if it was inserted 4043 /// directly into the aggregate. 4044 /// 4045 /// If InsertBefore is not null, this function will duplicate (modified) 4046 /// insertvalues when a part of a nested struct is extracted. 4047 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 4048 Instruction *InsertBefore) { 4049 // Nothing to index? Just return V then (this is useful at the end of our 4050 // recursion). 4051 if (idx_range.empty()) 4052 return V; 4053 // We have indices, so V should have an indexable type. 4054 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 4055 "Not looking at a struct or array?"); 4056 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 4057 "Invalid indices for type?"); 4058 4059 if (Constant *C = dyn_cast<Constant>(V)) { 4060 C = C->getAggregateElement(idx_range[0]); 4061 if (!C) return nullptr; 4062 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 4063 } 4064 4065 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 4066 // Loop the indices for the insertvalue instruction in parallel with the 4067 // requested indices 4068 const unsigned *req_idx = idx_range.begin(); 4069 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 4070 i != e; ++i, ++req_idx) { 4071 if (req_idx == idx_range.end()) { 4072 // We can't handle this without inserting insertvalues 4073 if (!InsertBefore) 4074 return nullptr; 4075 4076 // The requested index identifies a part of a nested aggregate. Handle 4077 // this specially. For example, 4078 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 4079 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 4080 // %C = extractvalue {i32, { i32, i32 } } %B, 1 4081 // This can be changed into 4082 // %A = insertvalue {i32, i32 } undef, i32 10, 0 4083 // %C = insertvalue {i32, i32 } %A, i32 11, 1 4084 // which allows the unused 0,0 element from the nested struct to be 4085 // removed. 4086 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 4087 InsertBefore); 4088 } 4089 4090 // This insert value inserts something else than what we are looking for. 4091 // See if the (aggregate) value inserted into has the value we are 4092 // looking for, then. 4093 if (*req_idx != *i) 4094 return FindInsertedValue(I->getAggregateOperand(), idx_range, 4095 InsertBefore); 4096 } 4097 // If we end up here, the indices of the insertvalue match with those 4098 // requested (though possibly only partially). Now we recursively look at 4099 // the inserted value, passing any remaining indices. 4100 return FindInsertedValue(I->getInsertedValueOperand(), 4101 makeArrayRef(req_idx, idx_range.end()), 4102 InsertBefore); 4103 } 4104 4105 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 4106 // If we're extracting a value from an aggregate that was extracted from 4107 // something else, we can extract from that something else directly instead. 4108 // However, we will need to chain I's indices with the requested indices. 4109 4110 // Calculate the number of indices required 4111 unsigned size = I->getNumIndices() + idx_range.size(); 4112 // Allocate some space to put the new indices in 4113 SmallVector<unsigned, 5> Idxs; 4114 Idxs.reserve(size); 4115 // Add indices from the extract value instruction 4116 Idxs.append(I->idx_begin(), I->idx_end()); 4117 4118 // Add requested indices 4119 Idxs.append(idx_range.begin(), idx_range.end()); 4120 4121 assert(Idxs.size() == size 4122 && "Number of indices added not correct?"); 4123 4124 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 4125 } 4126 // Otherwise, we don't know (such as, extracting from a function return value 4127 // or load instruction) 4128 return nullptr; 4129 } 4130 4131 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 4132 unsigned CharSize) { 4133 // Make sure the GEP has exactly three arguments. 4134 if (GEP->getNumOperands() != 3) 4135 return false; 4136 4137 // Make sure the index-ee is a pointer to array of \p CharSize integers. 4138 // CharSize. 4139 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 4140 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 4141 return false; 4142 4143 // Check to make sure that the first operand of the GEP is an integer and 4144 // has value 0 so that we are sure we're indexing into the initializer. 4145 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 4146 if (!FirstIdx || !FirstIdx->isZero()) 4147 return false; 4148 4149 return true; 4150 } 4151 4152 bool llvm::getConstantDataArrayInfo(const Value *V, 4153 ConstantDataArraySlice &Slice, 4154 unsigned ElementSize, uint64_t Offset) { 4155 assert(V); 4156 4157 // Look through bitcast instructions and geps. 4158 V = V->stripPointerCasts(); 4159 4160 // If the value is a GEP instruction or constant expression, treat it as an 4161 // offset. 4162 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 4163 // The GEP operator should be based on a pointer to string constant, and is 4164 // indexing into the string constant. 4165 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 4166 return false; 4167 4168 // If the second index isn't a ConstantInt, then this is a variable index 4169 // into the array. If this occurs, we can't say anything meaningful about 4170 // the string. 4171 uint64_t StartIdx = 0; 4172 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 4173 StartIdx = CI->getZExtValue(); 4174 else 4175 return false; 4176 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 4177 StartIdx + Offset); 4178 } 4179 4180 // The GEP instruction, constant or instruction, must reference a global 4181 // variable that is a constant and is initialized. The referenced constant 4182 // initializer is the array that we'll use for optimization. 4183 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 4184 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 4185 return false; 4186 4187 const ConstantDataArray *Array; 4188 ArrayType *ArrayTy; 4189 if (GV->getInitializer()->isNullValue()) { 4190 Type *GVTy = GV->getValueType(); 4191 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 4192 // A zeroinitializer for the array; there is no ConstantDataArray. 4193 Array = nullptr; 4194 } else { 4195 const DataLayout &DL = GV->getParent()->getDataLayout(); 4196 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize(); 4197 uint64_t Length = SizeInBytes / (ElementSize / 8); 4198 if (Length <= Offset) 4199 return false; 4200 4201 Slice.Array = nullptr; 4202 Slice.Offset = 0; 4203 Slice.Length = Length - Offset; 4204 return true; 4205 } 4206 } else { 4207 // This must be a ConstantDataArray. 4208 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 4209 if (!Array) 4210 return false; 4211 ArrayTy = Array->getType(); 4212 } 4213 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 4214 return false; 4215 4216 uint64_t NumElts = ArrayTy->getArrayNumElements(); 4217 if (Offset > NumElts) 4218 return false; 4219 4220 Slice.Array = Array; 4221 Slice.Offset = Offset; 4222 Slice.Length = NumElts - Offset; 4223 return true; 4224 } 4225 4226 /// This function computes the length of a null-terminated C string pointed to 4227 /// by V. If successful, it returns true and returns the string in Str. 4228 /// If unsuccessful, it returns false. 4229 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 4230 uint64_t Offset, bool TrimAtNul) { 4231 ConstantDataArraySlice Slice; 4232 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 4233 return false; 4234 4235 if (Slice.Array == nullptr) { 4236 if (TrimAtNul) { 4237 Str = StringRef(); 4238 return true; 4239 } 4240 if (Slice.Length == 1) { 4241 Str = StringRef("", 1); 4242 return true; 4243 } 4244 // We cannot instantiate a StringRef as we do not have an appropriate string 4245 // of 0s at hand. 4246 return false; 4247 } 4248 4249 // Start out with the entire array in the StringRef. 4250 Str = Slice.Array->getAsString(); 4251 // Skip over 'offset' bytes. 4252 Str = Str.substr(Slice.Offset); 4253 4254 if (TrimAtNul) { 4255 // Trim off the \0 and anything after it. If the array is not nul 4256 // terminated, we just return the whole end of string. The client may know 4257 // some other way that the string is length-bound. 4258 Str = Str.substr(0, Str.find('\0')); 4259 } 4260 return true; 4261 } 4262 4263 // These next two are very similar to the above, but also look through PHI 4264 // nodes. 4265 // TODO: See if we can integrate these two together. 4266 4267 /// If we can compute the length of the string pointed to by 4268 /// the specified pointer, return 'len+1'. If we can't, return 0. 4269 static uint64_t GetStringLengthH(const Value *V, 4270 SmallPtrSetImpl<const PHINode*> &PHIs, 4271 unsigned CharSize) { 4272 // Look through noop bitcast instructions. 4273 V = V->stripPointerCasts(); 4274 4275 // If this is a PHI node, there are two cases: either we have already seen it 4276 // or we haven't. 4277 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 4278 if (!PHIs.insert(PN).second) 4279 return ~0ULL; // already in the set. 4280 4281 // If it was new, see if all the input strings are the same length. 4282 uint64_t LenSoFar = ~0ULL; 4283 for (Value *IncValue : PN->incoming_values()) { 4284 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 4285 if (Len == 0) return 0; // Unknown length -> unknown. 4286 4287 if (Len == ~0ULL) continue; 4288 4289 if (Len != LenSoFar && LenSoFar != ~0ULL) 4290 return 0; // Disagree -> unknown. 4291 LenSoFar = Len; 4292 } 4293 4294 // Success, all agree. 4295 return LenSoFar; 4296 } 4297 4298 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 4299 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 4300 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 4301 if (Len1 == 0) return 0; 4302 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 4303 if (Len2 == 0) return 0; 4304 if (Len1 == ~0ULL) return Len2; 4305 if (Len2 == ~0ULL) return Len1; 4306 if (Len1 != Len2) return 0; 4307 return Len1; 4308 } 4309 4310 // Otherwise, see if we can read the string. 4311 ConstantDataArraySlice Slice; 4312 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 4313 return 0; 4314 4315 if (Slice.Array == nullptr) 4316 return 1; 4317 4318 // Search for nul characters 4319 unsigned NullIndex = 0; 4320 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 4321 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 4322 break; 4323 } 4324 4325 return NullIndex + 1; 4326 } 4327 4328 /// If we can compute the length of the string pointed to by 4329 /// the specified pointer, return 'len+1'. If we can't, return 0. 4330 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 4331 if (!V->getType()->isPointerTy()) 4332 return 0; 4333 4334 SmallPtrSet<const PHINode*, 32> PHIs; 4335 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 4336 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 4337 // an empty string as a length. 4338 return Len == ~0ULL ? 1 : Len; 4339 } 4340 4341 const Value * 4342 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call, 4343 bool MustPreserveNullness) { 4344 assert(Call && 4345 "getArgumentAliasingToReturnedPointer only works on nonnull calls"); 4346 if (const Value *RV = Call->getReturnedArgOperand()) 4347 return RV; 4348 // This can be used only as a aliasing property. 4349 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 4350 Call, MustPreserveNullness)) 4351 return Call->getArgOperand(0); 4352 return nullptr; 4353 } 4354 4355 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 4356 const CallBase *Call, bool MustPreserveNullness) { 4357 switch (Call->getIntrinsicID()) { 4358 case Intrinsic::launder_invariant_group: 4359 case Intrinsic::strip_invariant_group: 4360 case Intrinsic::aarch64_irg: 4361 case Intrinsic::aarch64_tagp: 4362 return true; 4363 case Intrinsic::ptrmask: 4364 return !MustPreserveNullness; 4365 default: 4366 return false; 4367 } 4368 } 4369 4370 /// \p PN defines a loop-variant pointer to an object. Check if the 4371 /// previous iteration of the loop was referring to the same object as \p PN. 4372 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 4373 const LoopInfo *LI) { 4374 // Find the loop-defined value. 4375 Loop *L = LI->getLoopFor(PN->getParent()); 4376 if (PN->getNumIncomingValues() != 2) 4377 return true; 4378 4379 // Find the value from previous iteration. 4380 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 4381 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 4382 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 4383 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 4384 return true; 4385 4386 // If a new pointer is loaded in the loop, the pointer references a different 4387 // object in every iteration. E.g.: 4388 // for (i) 4389 // int *p = a[i]; 4390 // ... 4391 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 4392 if (!L->isLoopInvariant(Load->getPointerOperand())) 4393 return false; 4394 return true; 4395 } 4396 4397 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) { 4398 if (!V->getType()->isPointerTy()) 4399 return V; 4400 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 4401 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 4402 V = GEP->getPointerOperand(); 4403 } else if (Operator::getOpcode(V) == Instruction::BitCast || 4404 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 4405 V = cast<Operator>(V)->getOperand(0); 4406 if (!V->getType()->isPointerTy()) 4407 return V; 4408 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) { 4409 if (GA->isInterposable()) 4410 return V; 4411 V = GA->getAliasee(); 4412 } else { 4413 if (auto *PHI = dyn_cast<PHINode>(V)) { 4414 // Look through single-arg phi nodes created by LCSSA. 4415 if (PHI->getNumIncomingValues() == 1) { 4416 V = PHI->getIncomingValue(0); 4417 continue; 4418 } 4419 } else if (auto *Call = dyn_cast<CallBase>(V)) { 4420 // CaptureTracking can know about special capturing properties of some 4421 // intrinsics like launder.invariant.group, that can't be expressed with 4422 // the attributes, but have properties like returning aliasing pointer. 4423 // Because some analysis may assume that nocaptured pointer is not 4424 // returned from some special intrinsic (because function would have to 4425 // be marked with returns attribute), it is crucial to use this function 4426 // because it should be in sync with CaptureTracking. Not using it may 4427 // cause weird miscompilations where 2 aliasing pointers are assumed to 4428 // noalias. 4429 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 4430 V = RP; 4431 continue; 4432 } 4433 } 4434 4435 return V; 4436 } 4437 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 4438 } 4439 return V; 4440 } 4441 4442 void llvm::getUnderlyingObjects(const Value *V, 4443 SmallVectorImpl<const Value *> &Objects, 4444 LoopInfo *LI, unsigned MaxLookup) { 4445 SmallPtrSet<const Value *, 4> Visited; 4446 SmallVector<const Value *, 4> Worklist; 4447 Worklist.push_back(V); 4448 do { 4449 const Value *P = Worklist.pop_back_val(); 4450 P = getUnderlyingObject(P, MaxLookup); 4451 4452 if (!Visited.insert(P).second) 4453 continue; 4454 4455 if (auto *SI = dyn_cast<SelectInst>(P)) { 4456 Worklist.push_back(SI->getTrueValue()); 4457 Worklist.push_back(SI->getFalseValue()); 4458 continue; 4459 } 4460 4461 if (auto *PN = dyn_cast<PHINode>(P)) { 4462 // If this PHI changes the underlying object in every iteration of the 4463 // loop, don't look through it. Consider: 4464 // int **A; 4465 // for (i) { 4466 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 4467 // Curr = A[i]; 4468 // *Prev, *Curr; 4469 // 4470 // Prev is tracking Curr one iteration behind so they refer to different 4471 // underlying objects. 4472 if (!LI || !LI->isLoopHeader(PN->getParent()) || 4473 isSameUnderlyingObjectInLoop(PN, LI)) 4474 append_range(Worklist, PN->incoming_values()); 4475 continue; 4476 } 4477 4478 Objects.push_back(P); 4479 } while (!Worklist.empty()); 4480 } 4481 4482 /// This is the function that does the work of looking through basic 4483 /// ptrtoint+arithmetic+inttoptr sequences. 4484 static const Value *getUnderlyingObjectFromInt(const Value *V) { 4485 do { 4486 if (const Operator *U = dyn_cast<Operator>(V)) { 4487 // If we find a ptrtoint, we can transfer control back to the 4488 // regular getUnderlyingObjectFromInt. 4489 if (U->getOpcode() == Instruction::PtrToInt) 4490 return U->getOperand(0); 4491 // If we find an add of a constant, a multiplied value, or a phi, it's 4492 // likely that the other operand will lead us to the base 4493 // object. We don't have to worry about the case where the 4494 // object address is somehow being computed by the multiply, 4495 // because our callers only care when the result is an 4496 // identifiable object. 4497 if (U->getOpcode() != Instruction::Add || 4498 (!isa<ConstantInt>(U->getOperand(1)) && 4499 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 4500 !isa<PHINode>(U->getOperand(1)))) 4501 return V; 4502 V = U->getOperand(0); 4503 } else { 4504 return V; 4505 } 4506 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 4507 } while (true); 4508 } 4509 4510 /// This is a wrapper around getUnderlyingObjects and adds support for basic 4511 /// ptrtoint+arithmetic+inttoptr sequences. 4512 /// It returns false if unidentified object is found in getUnderlyingObjects. 4513 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 4514 SmallVectorImpl<Value *> &Objects) { 4515 SmallPtrSet<const Value *, 16> Visited; 4516 SmallVector<const Value *, 4> Working(1, V); 4517 do { 4518 V = Working.pop_back_val(); 4519 4520 SmallVector<const Value *, 4> Objs; 4521 getUnderlyingObjects(V, Objs); 4522 4523 for (const Value *V : Objs) { 4524 if (!Visited.insert(V).second) 4525 continue; 4526 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 4527 const Value *O = 4528 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 4529 if (O->getType()->isPointerTy()) { 4530 Working.push_back(O); 4531 continue; 4532 } 4533 } 4534 // If getUnderlyingObjects fails to find an identifiable object, 4535 // getUnderlyingObjectsForCodeGen also fails for safety. 4536 if (!isIdentifiedObject(V)) { 4537 Objects.clear(); 4538 return false; 4539 } 4540 Objects.push_back(const_cast<Value *>(V)); 4541 } 4542 } while (!Working.empty()); 4543 return true; 4544 } 4545 4546 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) { 4547 AllocaInst *Result = nullptr; 4548 SmallPtrSet<Value *, 4> Visited; 4549 SmallVector<Value *, 4> Worklist; 4550 4551 auto AddWork = [&](Value *V) { 4552 if (Visited.insert(V).second) 4553 Worklist.push_back(V); 4554 }; 4555 4556 AddWork(V); 4557 do { 4558 V = Worklist.pop_back_val(); 4559 assert(Visited.count(V)); 4560 4561 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 4562 if (Result && Result != AI) 4563 return nullptr; 4564 Result = AI; 4565 } else if (CastInst *CI = dyn_cast<CastInst>(V)) { 4566 AddWork(CI->getOperand(0)); 4567 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 4568 for (Value *IncValue : PN->incoming_values()) 4569 AddWork(IncValue); 4570 } else if (auto *SI = dyn_cast<SelectInst>(V)) { 4571 AddWork(SI->getTrueValue()); 4572 AddWork(SI->getFalseValue()); 4573 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { 4574 if (OffsetZero && !GEP->hasAllZeroIndices()) 4575 return nullptr; 4576 AddWork(GEP->getPointerOperand()); 4577 } else if (CallBase *CB = dyn_cast<CallBase>(V)) { 4578 Value *Returned = CB->getReturnedArgOperand(); 4579 if (Returned) 4580 AddWork(Returned); 4581 else 4582 return nullptr; 4583 } else { 4584 return nullptr; 4585 } 4586 } while (!Worklist.empty()); 4587 4588 return Result; 4589 } 4590 4591 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 4592 const Value *V, bool AllowLifetime, bool AllowDroppable) { 4593 for (const User *U : V->users()) { 4594 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 4595 if (!II) 4596 return false; 4597 4598 if (AllowLifetime && II->isLifetimeStartOrEnd()) 4599 continue; 4600 4601 if (AllowDroppable && II->isDroppable()) 4602 continue; 4603 4604 return false; 4605 } 4606 return true; 4607 } 4608 4609 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 4610 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 4611 V, /* AllowLifetime */ true, /* AllowDroppable */ false); 4612 } 4613 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) { 4614 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 4615 V, /* AllowLifetime */ true, /* AllowDroppable */ true); 4616 } 4617 4618 bool llvm::mustSuppressSpeculation(const LoadInst &LI) { 4619 if (!LI.isUnordered()) 4620 return true; 4621 const Function &F = *LI.getFunction(); 4622 // Speculative load may create a race that did not exist in the source. 4623 return F.hasFnAttribute(Attribute::SanitizeThread) || 4624 // Speculative load may load data from dirty regions. 4625 F.hasFnAttribute(Attribute::SanitizeAddress) || 4626 F.hasFnAttribute(Attribute::SanitizeHWAddress); 4627 } 4628 4629 4630 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 4631 const Instruction *CtxI, 4632 const DominatorTree *DT, 4633 const TargetLibraryInfo *TLI) { 4634 const Operator *Inst = dyn_cast<Operator>(V); 4635 if (!Inst) 4636 return false; 4637 4638 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 4639 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 4640 if (C->canTrap()) 4641 return false; 4642 4643 switch (Inst->getOpcode()) { 4644 default: 4645 return true; 4646 case Instruction::UDiv: 4647 case Instruction::URem: { 4648 // x / y is undefined if y == 0. 4649 const APInt *V; 4650 if (match(Inst->getOperand(1), m_APInt(V))) 4651 return *V != 0; 4652 return false; 4653 } 4654 case Instruction::SDiv: 4655 case Instruction::SRem: { 4656 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 4657 const APInt *Numerator, *Denominator; 4658 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 4659 return false; 4660 // We cannot hoist this division if the denominator is 0. 4661 if (*Denominator == 0) 4662 return false; 4663 // It's safe to hoist if the denominator is not 0 or -1. 4664 if (!Denominator->isAllOnes()) 4665 return true; 4666 // At this point we know that the denominator is -1. It is safe to hoist as 4667 // long we know that the numerator is not INT_MIN. 4668 if (match(Inst->getOperand(0), m_APInt(Numerator))) 4669 return !Numerator->isMinSignedValue(); 4670 // The numerator *might* be MinSignedValue. 4671 return false; 4672 } 4673 case Instruction::Load: { 4674 const LoadInst *LI = cast<LoadInst>(Inst); 4675 if (mustSuppressSpeculation(*LI)) 4676 return false; 4677 const DataLayout &DL = LI->getModule()->getDataLayout(); 4678 return isDereferenceableAndAlignedPointer( 4679 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()), 4680 DL, CtxI, DT, TLI); 4681 } 4682 case Instruction::Call: { 4683 auto *CI = cast<const CallInst>(Inst); 4684 const Function *Callee = CI->getCalledFunction(); 4685 4686 // The called function could have undefined behavior or side-effects, even 4687 // if marked readnone nounwind. 4688 return Callee && Callee->isSpeculatable(); 4689 } 4690 case Instruction::VAArg: 4691 case Instruction::Alloca: 4692 case Instruction::Invoke: 4693 case Instruction::CallBr: 4694 case Instruction::PHI: 4695 case Instruction::Store: 4696 case Instruction::Ret: 4697 case Instruction::Br: 4698 case Instruction::IndirectBr: 4699 case Instruction::Switch: 4700 case Instruction::Unreachable: 4701 case Instruction::Fence: 4702 case Instruction::AtomicRMW: 4703 case Instruction::AtomicCmpXchg: 4704 case Instruction::LandingPad: 4705 case Instruction::Resume: 4706 case Instruction::CatchSwitch: 4707 case Instruction::CatchPad: 4708 case Instruction::CatchRet: 4709 case Instruction::CleanupPad: 4710 case Instruction::CleanupRet: 4711 return false; // Misc instructions which have effects 4712 } 4713 } 4714 4715 bool llvm::mayBeMemoryDependent(const Instruction &I) { 4716 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 4717 } 4718 4719 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult. 4720 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) { 4721 switch (OR) { 4722 case ConstantRange::OverflowResult::MayOverflow: 4723 return OverflowResult::MayOverflow; 4724 case ConstantRange::OverflowResult::AlwaysOverflowsLow: 4725 return OverflowResult::AlwaysOverflowsLow; 4726 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: 4727 return OverflowResult::AlwaysOverflowsHigh; 4728 case ConstantRange::OverflowResult::NeverOverflows: 4729 return OverflowResult::NeverOverflows; 4730 } 4731 llvm_unreachable("Unknown OverflowResult"); 4732 } 4733 4734 /// Combine constant ranges from computeConstantRange() and computeKnownBits(). 4735 static ConstantRange computeConstantRangeIncludingKnownBits( 4736 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth, 4737 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4738 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) { 4739 KnownBits Known = computeKnownBits( 4740 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo); 4741 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned); 4742 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo); 4743 ConstantRange::PreferredRangeType RangeType = 4744 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned; 4745 return CR1.intersectWith(CR2, RangeType); 4746 } 4747 4748 OverflowResult llvm::computeOverflowForUnsignedMul( 4749 const Value *LHS, const Value *RHS, const DataLayout &DL, 4750 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4751 bool UseInstrInfo) { 4752 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4753 nullptr, UseInstrInfo); 4754 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4755 nullptr, UseInstrInfo); 4756 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false); 4757 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false); 4758 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange)); 4759 } 4760 4761 OverflowResult 4762 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 4763 const DataLayout &DL, AssumptionCache *AC, 4764 const Instruction *CxtI, 4765 const DominatorTree *DT, bool UseInstrInfo) { 4766 // Multiplying n * m significant bits yields a result of n + m significant 4767 // bits. If the total number of significant bits does not exceed the 4768 // result bit width (minus 1), there is no overflow. 4769 // This means if we have enough leading sign bits in the operands 4770 // we can guarantee that the result does not overflow. 4771 // Ref: "Hacker's Delight" by Henry Warren 4772 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 4773 4774 // Note that underestimating the number of sign bits gives a more 4775 // conservative answer. 4776 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 4777 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 4778 4779 // First handle the easy case: if we have enough sign bits there's 4780 // definitely no overflow. 4781 if (SignBits > BitWidth + 1) 4782 return OverflowResult::NeverOverflows; 4783 4784 // There are two ambiguous cases where there can be no overflow: 4785 // SignBits == BitWidth + 1 and 4786 // SignBits == BitWidth 4787 // The second case is difficult to check, therefore we only handle the 4788 // first case. 4789 if (SignBits == BitWidth + 1) { 4790 // It overflows only when both arguments are negative and the true 4791 // product is exactly the minimum negative number. 4792 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 4793 // For simplicity we just check if at least one side is not negative. 4794 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4795 nullptr, UseInstrInfo); 4796 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4797 nullptr, UseInstrInfo); 4798 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 4799 return OverflowResult::NeverOverflows; 4800 } 4801 return OverflowResult::MayOverflow; 4802 } 4803 4804 OverflowResult llvm::computeOverflowForUnsignedAdd( 4805 const Value *LHS, const Value *RHS, const DataLayout &DL, 4806 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4807 bool UseInstrInfo) { 4808 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4809 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4810 nullptr, UseInstrInfo); 4811 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4812 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4813 nullptr, UseInstrInfo); 4814 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange)); 4815 } 4816 4817 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 4818 const Value *RHS, 4819 const AddOperator *Add, 4820 const DataLayout &DL, 4821 AssumptionCache *AC, 4822 const Instruction *CxtI, 4823 const DominatorTree *DT) { 4824 if (Add && Add->hasNoSignedWrap()) { 4825 return OverflowResult::NeverOverflows; 4826 } 4827 4828 // If LHS and RHS each have at least two sign bits, the addition will look 4829 // like 4830 // 4831 // XX..... + 4832 // YY..... 4833 // 4834 // If the carry into the most significant position is 0, X and Y can't both 4835 // be 1 and therefore the carry out of the addition is also 0. 4836 // 4837 // If the carry into the most significant position is 1, X and Y can't both 4838 // be 0 and therefore the carry out of the addition is also 1. 4839 // 4840 // Since the carry into the most significant position is always equal to 4841 // the carry out of the addition, there is no signed overflow. 4842 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4843 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4844 return OverflowResult::NeverOverflows; 4845 4846 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4847 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4848 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4849 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4850 OverflowResult OR = 4851 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); 4852 if (OR != OverflowResult::MayOverflow) 4853 return OR; 4854 4855 // The remaining code needs Add to be available. Early returns if not so. 4856 if (!Add) 4857 return OverflowResult::MayOverflow; 4858 4859 // If the sign of Add is the same as at least one of the operands, this add 4860 // CANNOT overflow. If this can be determined from the known bits of the 4861 // operands the above signedAddMayOverflow() check will have already done so. 4862 // The only other way to improve on the known bits is from an assumption, so 4863 // call computeKnownBitsFromAssume() directly. 4864 bool LHSOrRHSKnownNonNegative = 4865 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative()); 4866 bool LHSOrRHSKnownNegative = 4867 (LHSRange.isAllNegative() || RHSRange.isAllNegative()); 4868 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 4869 KnownBits AddKnown(LHSRange.getBitWidth()); 4870 computeKnownBitsFromAssume( 4871 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); 4872 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 4873 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) 4874 return OverflowResult::NeverOverflows; 4875 } 4876 4877 return OverflowResult::MayOverflow; 4878 } 4879 4880 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 4881 const Value *RHS, 4882 const DataLayout &DL, 4883 AssumptionCache *AC, 4884 const Instruction *CxtI, 4885 const DominatorTree *DT) { 4886 // Checking for conditions implied by dominating conditions may be expensive. 4887 // Limit it to usub_with_overflow calls for now. 4888 if (match(CxtI, 4889 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value()))) 4890 if (auto C = 4891 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) { 4892 if (*C) 4893 return OverflowResult::NeverOverflows; 4894 return OverflowResult::AlwaysOverflowsLow; 4895 } 4896 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4897 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 4898 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4899 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 4900 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange)); 4901 } 4902 4903 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 4904 const Value *RHS, 4905 const DataLayout &DL, 4906 AssumptionCache *AC, 4907 const Instruction *CxtI, 4908 const DominatorTree *DT) { 4909 // If LHS and RHS each have at least two sign bits, the subtraction 4910 // cannot overflow. 4911 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4912 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4913 return OverflowResult::NeverOverflows; 4914 4915 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4916 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4917 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4918 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4919 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); 4920 } 4921 4922 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, 4923 const DominatorTree &DT) { 4924 SmallVector<const BranchInst *, 2> GuardingBranches; 4925 SmallVector<const ExtractValueInst *, 2> Results; 4926 4927 for (const User *U : WO->users()) { 4928 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 4929 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 4930 4931 if (EVI->getIndices()[0] == 0) 4932 Results.push_back(EVI); 4933 else { 4934 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 4935 4936 for (const auto *U : EVI->users()) 4937 if (const auto *B = dyn_cast<BranchInst>(U)) { 4938 assert(B->isConditional() && "How else is it using an i1?"); 4939 GuardingBranches.push_back(B); 4940 } 4941 } 4942 } else { 4943 // We are using the aggregate directly in a way we don't want to analyze 4944 // here (storing it to a global, say). 4945 return false; 4946 } 4947 } 4948 4949 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 4950 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 4951 if (!NoWrapEdge.isSingleEdge()) 4952 return false; 4953 4954 // Check if all users of the add are provably no-wrap. 4955 for (const auto *Result : Results) { 4956 // If the extractvalue itself is not executed on overflow, the we don't 4957 // need to check each use separately, since domination is transitive. 4958 if (DT.dominates(NoWrapEdge, Result->getParent())) 4959 continue; 4960 4961 for (auto &RU : Result->uses()) 4962 if (!DT.dominates(NoWrapEdge, RU)) 4963 return false; 4964 } 4965 4966 return true; 4967 }; 4968 4969 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 4970 } 4971 4972 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly, 4973 bool ConsiderFlags) { 4974 4975 if (ConsiderFlags && Op->hasPoisonGeneratingFlags()) 4976 return true; 4977 4978 // TODO: this should really be under the ConsiderFlags block, but currently 4979 // these are not dropped by dropPoisonGeneratingFlags 4980 if (const auto *FP = dyn_cast<FPMathOperator>(Op)) { 4981 auto FMF = FP->getFastMathFlags(); 4982 if (FMF.noNaNs() || FMF.noInfs()) 4983 return true; 4984 } 4985 4986 unsigned Opcode = Op->getOpcode(); 4987 4988 // Check whether opcode is a poison/undef-generating operation 4989 switch (Opcode) { 4990 case Instruction::Shl: 4991 case Instruction::AShr: 4992 case Instruction::LShr: { 4993 // Shifts return poison if shiftwidth is larger than the bitwidth. 4994 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) { 4995 SmallVector<Constant *, 4> ShiftAmounts; 4996 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { 4997 unsigned NumElts = FVTy->getNumElements(); 4998 for (unsigned i = 0; i < NumElts; ++i) 4999 ShiftAmounts.push_back(C->getAggregateElement(i)); 5000 } else if (isa<ScalableVectorType>(C->getType())) 5001 return true; // Can't tell, just return true to be safe 5002 else 5003 ShiftAmounts.push_back(C); 5004 5005 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) { 5006 auto *CI = dyn_cast_or_null<ConstantInt>(C); 5007 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth()); 5008 }); 5009 return !Safe; 5010 } 5011 return true; 5012 } 5013 case Instruction::FPToSI: 5014 case Instruction::FPToUI: 5015 // fptosi/ui yields poison if the resulting value does not fit in the 5016 // destination type. 5017 return true; 5018 case Instruction::Call: 5019 if (auto *II = dyn_cast<IntrinsicInst>(Op)) { 5020 switch (II->getIntrinsicID()) { 5021 // TODO: Add more intrinsics. 5022 case Intrinsic::ctpop: 5023 case Intrinsic::sadd_with_overflow: 5024 case Intrinsic::ssub_with_overflow: 5025 case Intrinsic::smul_with_overflow: 5026 case Intrinsic::uadd_with_overflow: 5027 case Intrinsic::usub_with_overflow: 5028 case Intrinsic::umul_with_overflow: 5029 return false; 5030 } 5031 } 5032 LLVM_FALLTHROUGH; 5033 case Instruction::CallBr: 5034 case Instruction::Invoke: { 5035 const auto *CB = cast<CallBase>(Op); 5036 return !CB->hasRetAttr(Attribute::NoUndef); 5037 } 5038 case Instruction::InsertElement: 5039 case Instruction::ExtractElement: { 5040 // If index exceeds the length of the vector, it returns poison 5041 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType()); 5042 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1; 5043 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp)); 5044 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue())) 5045 return true; 5046 return false; 5047 } 5048 case Instruction::ShuffleVector: { 5049 // shufflevector may return undef. 5050 if (PoisonOnly) 5051 return false; 5052 ArrayRef<int> Mask = isa<ConstantExpr>(Op) 5053 ? cast<ConstantExpr>(Op)->getShuffleMask() 5054 : cast<ShuffleVectorInst>(Op)->getShuffleMask(); 5055 return is_contained(Mask, UndefMaskElem); 5056 } 5057 case Instruction::FNeg: 5058 case Instruction::PHI: 5059 case Instruction::Select: 5060 case Instruction::URem: 5061 case Instruction::SRem: 5062 case Instruction::ExtractValue: 5063 case Instruction::InsertValue: 5064 case Instruction::Freeze: 5065 case Instruction::ICmp: 5066 case Instruction::FCmp: 5067 return false; 5068 case Instruction::GetElementPtr: 5069 // inbounds is handled above 5070 // TODO: what about inrange on constexpr? 5071 return false; 5072 default: { 5073 const auto *CE = dyn_cast<ConstantExpr>(Op); 5074 if (isa<CastInst>(Op) || (CE && CE->isCast())) 5075 return false; 5076 else if (Instruction::isBinaryOp(Opcode)) 5077 return false; 5078 // Be conservative and return true. 5079 return true; 5080 } 5081 } 5082 } 5083 5084 bool llvm::canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags) { 5085 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, ConsiderFlags); 5086 } 5087 5088 bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlags) { 5089 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, ConsiderFlags); 5090 } 5091 5092 static bool directlyImpliesPoison(const Value *ValAssumedPoison, 5093 const Value *V, unsigned Depth) { 5094 if (ValAssumedPoison == V) 5095 return true; 5096 5097 const unsigned MaxDepth = 2; 5098 if (Depth >= MaxDepth) 5099 return false; 5100 5101 if (const auto *I = dyn_cast<Instruction>(V)) { 5102 if (propagatesPoison(cast<Operator>(I))) 5103 return any_of(I->operands(), [=](const Value *Op) { 5104 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1); 5105 }); 5106 5107 // 'select ValAssumedPoison, _, _' is poison. 5108 if (const auto *SI = dyn_cast<SelectInst>(I)) 5109 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(), 5110 Depth + 1); 5111 // V = extractvalue V0, idx 5112 // V2 = extractvalue V0, idx2 5113 // V0's elements are all poison or not. (e.g., add_with_overflow) 5114 const WithOverflowInst *II; 5115 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) && 5116 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) || 5117 llvm::is_contained(II->args(), ValAssumedPoison))) 5118 return true; 5119 } 5120 return false; 5121 } 5122 5123 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V, 5124 unsigned Depth) { 5125 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison)) 5126 return true; 5127 5128 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) 5129 return true; 5130 5131 const unsigned MaxDepth = 2; 5132 if (Depth >= MaxDepth) 5133 return false; 5134 5135 const auto *I = dyn_cast<Instruction>(ValAssumedPoison); 5136 if (I && !canCreatePoison(cast<Operator>(I))) { 5137 return all_of(I->operands(), [=](const Value *Op) { 5138 return impliesPoison(Op, V, Depth + 1); 5139 }); 5140 } 5141 return false; 5142 } 5143 5144 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) { 5145 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); 5146 } 5147 5148 static bool programUndefinedIfUndefOrPoison(const Value *V, 5149 bool PoisonOnly); 5150 5151 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V, 5152 AssumptionCache *AC, 5153 const Instruction *CtxI, 5154 const DominatorTree *DT, 5155 unsigned Depth, bool PoisonOnly) { 5156 if (Depth >= MaxAnalysisRecursionDepth) 5157 return false; 5158 5159 if (isa<MetadataAsValue>(V)) 5160 return false; 5161 5162 if (const auto *A = dyn_cast<Argument>(V)) { 5163 if (A->hasAttribute(Attribute::NoUndef)) 5164 return true; 5165 } 5166 5167 if (auto *C = dyn_cast<Constant>(V)) { 5168 if (isa<UndefValue>(C)) 5169 return PoisonOnly && !isa<PoisonValue>(C); 5170 5171 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) || 5172 isa<ConstantPointerNull>(C) || isa<Function>(C)) 5173 return true; 5174 5175 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) 5176 return (PoisonOnly ? !C->containsPoisonElement() 5177 : !C->containsUndefOrPoisonElement()) && 5178 !C->containsConstantExpression(); 5179 } 5180 5181 // Strip cast operations from a pointer value. 5182 // Note that stripPointerCastsSameRepresentation can strip off getelementptr 5183 // inbounds with zero offset. To guarantee that the result isn't poison, the 5184 // stripped pointer is checked as it has to be pointing into an allocated 5185 // object or be null `null` to ensure `inbounds` getelement pointers with a 5186 // zero offset could not produce poison. 5187 // It can strip off addrspacecast that do not change bit representation as 5188 // well. We believe that such addrspacecast is equivalent to no-op. 5189 auto *StrippedV = V->stripPointerCastsSameRepresentation(); 5190 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) || 5191 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV)) 5192 return true; 5193 5194 auto OpCheck = [&](const Value *V) { 5195 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, 5196 PoisonOnly); 5197 }; 5198 5199 if (auto *Opr = dyn_cast<Operator>(V)) { 5200 // If the value is a freeze instruction, then it can never 5201 // be undef or poison. 5202 if (isa<FreezeInst>(V)) 5203 return true; 5204 5205 if (const auto *CB = dyn_cast<CallBase>(V)) { 5206 if (CB->hasRetAttr(Attribute::NoUndef)) 5207 return true; 5208 } 5209 5210 if (const auto *PN = dyn_cast<PHINode>(V)) { 5211 unsigned Num = PN->getNumIncomingValues(); 5212 bool IsWellDefined = true; 5213 for (unsigned i = 0; i < Num; ++i) { 5214 auto *TI = PN->getIncomingBlock(i)->getTerminator(); 5215 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, 5216 DT, Depth + 1, PoisonOnly)) { 5217 IsWellDefined = false; 5218 break; 5219 } 5220 } 5221 if (IsWellDefined) 5222 return true; 5223 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck)) 5224 return true; 5225 } 5226 5227 if (auto *I = dyn_cast<LoadInst>(V)) 5228 if (I->getMetadata(LLVMContext::MD_noundef)) 5229 return true; 5230 5231 if (programUndefinedIfUndefOrPoison(V, PoisonOnly)) 5232 return true; 5233 5234 // CxtI may be null or a cloned instruction. 5235 if (!CtxI || !CtxI->getParent() || !DT) 5236 return false; 5237 5238 auto *DNode = DT->getNode(CtxI->getParent()); 5239 if (!DNode) 5240 // Unreachable block 5241 return false; 5242 5243 // If V is used as a branch condition before reaching CtxI, V cannot be 5244 // undef or poison. 5245 // br V, BB1, BB2 5246 // BB1: 5247 // CtxI ; V cannot be undef or poison here 5248 auto *Dominator = DNode->getIDom(); 5249 while (Dominator) { 5250 auto *TI = Dominator->getBlock()->getTerminator(); 5251 5252 Value *Cond = nullptr; 5253 if (auto BI = dyn_cast<BranchInst>(TI)) { 5254 if (BI->isConditional()) 5255 Cond = BI->getCondition(); 5256 } else if (auto SI = dyn_cast<SwitchInst>(TI)) { 5257 Cond = SI->getCondition(); 5258 } 5259 5260 if (Cond) { 5261 if (Cond == V) 5262 return true; 5263 else if (PoisonOnly && isa<Operator>(Cond)) { 5264 // For poison, we can analyze further 5265 auto *Opr = cast<Operator>(Cond); 5266 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V)) 5267 return true; 5268 } 5269 } 5270 5271 Dominator = Dominator->getIDom(); 5272 } 5273 5274 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC)) 5275 return true; 5276 5277 return false; 5278 } 5279 5280 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC, 5281 const Instruction *CtxI, 5282 const DominatorTree *DT, 5283 unsigned Depth) { 5284 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false); 5285 } 5286 5287 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, 5288 const Instruction *CtxI, 5289 const DominatorTree *DT, unsigned Depth) { 5290 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true); 5291 } 5292 5293 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 5294 const DataLayout &DL, 5295 AssumptionCache *AC, 5296 const Instruction *CxtI, 5297 const DominatorTree *DT) { 5298 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 5299 Add, DL, AC, CxtI, DT); 5300 } 5301 5302 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 5303 const Value *RHS, 5304 const DataLayout &DL, 5305 AssumptionCache *AC, 5306 const Instruction *CxtI, 5307 const DominatorTree *DT) { 5308 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 5309 } 5310 5311 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 5312 // Note: An atomic operation isn't guaranteed to return in a reasonable amount 5313 // of time because it's possible for another thread to interfere with it for an 5314 // arbitrary length of time, but programs aren't allowed to rely on that. 5315 5316 // If there is no successor, then execution can't transfer to it. 5317 if (isa<ReturnInst>(I)) 5318 return false; 5319 if (isa<UnreachableInst>(I)) 5320 return false; 5321 5322 // Note: Do not add new checks here; instead, change Instruction::mayThrow or 5323 // Instruction::willReturn. 5324 // 5325 // FIXME: Move this check into Instruction::willReturn. 5326 if (isa<CatchPadInst>(I)) { 5327 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) { 5328 default: 5329 // A catchpad may invoke exception object constructors and such, which 5330 // in some languages can be arbitrary code, so be conservative by default. 5331 return false; 5332 case EHPersonality::CoreCLR: 5333 // For CoreCLR, it just involves a type test. 5334 return true; 5335 } 5336 } 5337 5338 // An instruction that returns without throwing must transfer control flow 5339 // to a successor. 5340 return !I->mayThrow() && I->willReturn(); 5341 } 5342 5343 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 5344 // TODO: This is slightly conservative for invoke instruction since exiting 5345 // via an exception *is* normal control for them. 5346 for (const Instruction &I : *BB) 5347 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5348 return false; 5349 return true; 5350 } 5351 5352 bool llvm::isGuaranteedToTransferExecutionToSuccessor( 5353 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, 5354 unsigned ScanLimit) { 5355 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End), 5356 ScanLimit); 5357 } 5358 5359 bool llvm::isGuaranteedToTransferExecutionToSuccessor( 5360 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) { 5361 assert(ScanLimit && "scan limit must be non-zero"); 5362 for (const Instruction &I : Range) { 5363 if (isa<DbgInfoIntrinsic>(I)) 5364 continue; 5365 if (--ScanLimit == 0) 5366 return false; 5367 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5368 return false; 5369 } 5370 return true; 5371 } 5372 5373 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 5374 const Loop *L) { 5375 // The loop header is guaranteed to be executed for every iteration. 5376 // 5377 // FIXME: Relax this constraint to cover all basic blocks that are 5378 // guaranteed to be executed at every iteration. 5379 if (I->getParent() != L->getHeader()) return false; 5380 5381 for (const Instruction &LI : *L->getHeader()) { 5382 if (&LI == I) return true; 5383 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 5384 } 5385 llvm_unreachable("Instruction not contained in its own parent basic block."); 5386 } 5387 5388 bool llvm::propagatesPoison(const Operator *I) { 5389 switch (I->getOpcode()) { 5390 case Instruction::Freeze: 5391 case Instruction::Select: 5392 case Instruction::PHI: 5393 case Instruction::Invoke: 5394 return false; 5395 case Instruction::Call: 5396 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 5397 switch (II->getIntrinsicID()) { 5398 // TODO: Add more intrinsics. 5399 case Intrinsic::sadd_with_overflow: 5400 case Intrinsic::ssub_with_overflow: 5401 case Intrinsic::smul_with_overflow: 5402 case Intrinsic::uadd_with_overflow: 5403 case Intrinsic::usub_with_overflow: 5404 case Intrinsic::umul_with_overflow: 5405 // If an input is a vector containing a poison element, the 5406 // two output vectors (calculated results, overflow bits)' 5407 // corresponding lanes are poison. 5408 return true; 5409 case Intrinsic::ctpop: 5410 return true; 5411 } 5412 } 5413 return false; 5414 case Instruction::ICmp: 5415 case Instruction::FCmp: 5416 case Instruction::GetElementPtr: 5417 return true; 5418 default: 5419 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I)) 5420 return true; 5421 5422 // Be conservative and return false. 5423 return false; 5424 } 5425 } 5426 5427 void llvm::getGuaranteedWellDefinedOps( 5428 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) { 5429 switch (I->getOpcode()) { 5430 case Instruction::Store: 5431 Operands.insert(cast<StoreInst>(I)->getPointerOperand()); 5432 break; 5433 5434 case Instruction::Load: 5435 Operands.insert(cast<LoadInst>(I)->getPointerOperand()); 5436 break; 5437 5438 // Since dereferenceable attribute imply noundef, atomic operations 5439 // also implicitly have noundef pointers too 5440 case Instruction::AtomicCmpXchg: 5441 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand()); 5442 break; 5443 5444 case Instruction::AtomicRMW: 5445 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand()); 5446 break; 5447 5448 case Instruction::Call: 5449 case Instruction::Invoke: { 5450 const CallBase *CB = cast<CallBase>(I); 5451 if (CB->isIndirectCall()) 5452 Operands.insert(CB->getCalledOperand()); 5453 for (unsigned i = 0; i < CB->arg_size(); ++i) { 5454 if (CB->paramHasAttr(i, Attribute::NoUndef) || 5455 CB->paramHasAttr(i, Attribute::Dereferenceable)) 5456 Operands.insert(CB->getArgOperand(i)); 5457 } 5458 break; 5459 } 5460 case Instruction::Ret: 5461 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef)) 5462 Operands.insert(I->getOperand(0)); 5463 break; 5464 default: 5465 break; 5466 } 5467 } 5468 5469 void llvm::getGuaranteedNonPoisonOps(const Instruction *I, 5470 SmallPtrSetImpl<const Value *> &Operands) { 5471 getGuaranteedWellDefinedOps(I, Operands); 5472 switch (I->getOpcode()) { 5473 // Divisors of these operations are allowed to be partially undef. 5474 case Instruction::UDiv: 5475 case Instruction::SDiv: 5476 case Instruction::URem: 5477 case Instruction::SRem: 5478 Operands.insert(I->getOperand(1)); 5479 break; 5480 case Instruction::Switch: 5481 if (BranchOnPoisonAsUB) 5482 Operands.insert(cast<SwitchInst>(I)->getCondition()); 5483 break; 5484 case Instruction::Br: { 5485 auto *BR = cast<BranchInst>(I); 5486 if (BranchOnPoisonAsUB && BR->isConditional()) 5487 Operands.insert(BR->getCondition()); 5488 break; 5489 } 5490 default: 5491 break; 5492 } 5493 } 5494 5495 bool llvm::mustTriggerUB(const Instruction *I, 5496 const SmallSet<const Value *, 16>& KnownPoison) { 5497 SmallPtrSet<const Value *, 4> NonPoisonOps; 5498 getGuaranteedNonPoisonOps(I, NonPoisonOps); 5499 5500 for (const auto *V : NonPoisonOps) 5501 if (KnownPoison.count(V)) 5502 return true; 5503 5504 return false; 5505 } 5506 5507 static bool programUndefinedIfUndefOrPoison(const Value *V, 5508 bool PoisonOnly) { 5509 // We currently only look for uses of values within the same basic 5510 // block, as that makes it easier to guarantee that the uses will be 5511 // executed given that Inst is executed. 5512 // 5513 // FIXME: Expand this to consider uses beyond the same basic block. To do 5514 // this, look out for the distinction between post-dominance and strong 5515 // post-dominance. 5516 const BasicBlock *BB = nullptr; 5517 BasicBlock::const_iterator Begin; 5518 if (const auto *Inst = dyn_cast<Instruction>(V)) { 5519 BB = Inst->getParent(); 5520 Begin = Inst->getIterator(); 5521 Begin++; 5522 } else if (const auto *Arg = dyn_cast<Argument>(V)) { 5523 BB = &Arg->getParent()->getEntryBlock(); 5524 Begin = BB->begin(); 5525 } else { 5526 return false; 5527 } 5528 5529 // Limit number of instructions we look at, to avoid scanning through large 5530 // blocks. The current limit is chosen arbitrarily. 5531 unsigned ScanLimit = 32; 5532 BasicBlock::const_iterator End = BB->end(); 5533 5534 if (!PoisonOnly) { 5535 // Since undef does not propagate eagerly, be conservative & just check 5536 // whether a value is directly passed to an instruction that must take 5537 // well-defined operands. 5538 5539 for (auto &I : make_range(Begin, End)) { 5540 if (isa<DbgInfoIntrinsic>(I)) 5541 continue; 5542 if (--ScanLimit == 0) 5543 break; 5544 5545 SmallPtrSet<const Value *, 4> WellDefinedOps; 5546 getGuaranteedWellDefinedOps(&I, WellDefinedOps); 5547 if (WellDefinedOps.contains(V)) 5548 return true; 5549 5550 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5551 break; 5552 } 5553 return false; 5554 } 5555 5556 // Set of instructions that we have proved will yield poison if Inst 5557 // does. 5558 SmallSet<const Value *, 16> YieldsPoison; 5559 SmallSet<const BasicBlock *, 4> Visited; 5560 5561 YieldsPoison.insert(V); 5562 auto Propagate = [&](const User *User) { 5563 if (propagatesPoison(cast<Operator>(User))) 5564 YieldsPoison.insert(User); 5565 }; 5566 for_each(V->users(), Propagate); 5567 Visited.insert(BB); 5568 5569 while (true) { 5570 for (auto &I : make_range(Begin, End)) { 5571 if (isa<DbgInfoIntrinsic>(I)) 5572 continue; 5573 if (--ScanLimit == 0) 5574 return false; 5575 if (mustTriggerUB(&I, YieldsPoison)) 5576 return true; 5577 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5578 return false; 5579 5580 // Mark poison that propagates from I through uses of I. 5581 if (YieldsPoison.count(&I)) 5582 for_each(I.users(), Propagate); 5583 } 5584 5585 BB = BB->getSingleSuccessor(); 5586 if (!BB || !Visited.insert(BB).second) 5587 break; 5588 5589 Begin = BB->getFirstNonPHI()->getIterator(); 5590 End = BB->end(); 5591 } 5592 return false; 5593 } 5594 5595 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) { 5596 return ::programUndefinedIfUndefOrPoison(Inst, false); 5597 } 5598 5599 bool llvm::programUndefinedIfPoison(const Instruction *Inst) { 5600 return ::programUndefinedIfUndefOrPoison(Inst, true); 5601 } 5602 5603 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 5604 if (FMF.noNaNs()) 5605 return true; 5606 5607 if (auto *C = dyn_cast<ConstantFP>(V)) 5608 return !C->isNaN(); 5609 5610 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 5611 if (!C->getElementType()->isFloatingPointTy()) 5612 return false; 5613 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 5614 if (C->getElementAsAPFloat(I).isNaN()) 5615 return false; 5616 } 5617 return true; 5618 } 5619 5620 if (isa<ConstantAggregateZero>(V)) 5621 return true; 5622 5623 return false; 5624 } 5625 5626 static bool isKnownNonZero(const Value *V) { 5627 if (auto *C = dyn_cast<ConstantFP>(V)) 5628 return !C->isZero(); 5629 5630 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 5631 if (!C->getElementType()->isFloatingPointTy()) 5632 return false; 5633 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 5634 if (C->getElementAsAPFloat(I).isZero()) 5635 return false; 5636 } 5637 return true; 5638 } 5639 5640 return false; 5641 } 5642 5643 /// Match clamp pattern for float types without care about NaNs or signed zeros. 5644 /// Given non-min/max outer cmp/select from the clamp pattern this 5645 /// function recognizes if it can be substitued by a "canonical" min/max 5646 /// pattern. 5647 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 5648 Value *CmpLHS, Value *CmpRHS, 5649 Value *TrueVal, Value *FalseVal, 5650 Value *&LHS, Value *&RHS) { 5651 // Try to match 5652 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 5653 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 5654 // and return description of the outer Max/Min. 5655 5656 // First, check if select has inverse order: 5657 if (CmpRHS == FalseVal) { 5658 std::swap(TrueVal, FalseVal); 5659 Pred = CmpInst::getInversePredicate(Pred); 5660 } 5661 5662 // Assume success now. If there's no match, callers should not use these anyway. 5663 LHS = TrueVal; 5664 RHS = FalseVal; 5665 5666 const APFloat *FC1; 5667 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 5668 return {SPF_UNKNOWN, SPNB_NA, false}; 5669 5670 const APFloat *FC2; 5671 switch (Pred) { 5672 case CmpInst::FCMP_OLT: 5673 case CmpInst::FCMP_OLE: 5674 case CmpInst::FCMP_ULT: 5675 case CmpInst::FCMP_ULE: 5676 if (match(FalseVal, 5677 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 5678 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 5679 *FC1 < *FC2) 5680 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 5681 break; 5682 case CmpInst::FCMP_OGT: 5683 case CmpInst::FCMP_OGE: 5684 case CmpInst::FCMP_UGT: 5685 case CmpInst::FCMP_UGE: 5686 if (match(FalseVal, 5687 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 5688 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 5689 *FC1 > *FC2) 5690 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 5691 break; 5692 default: 5693 break; 5694 } 5695 5696 return {SPF_UNKNOWN, SPNB_NA, false}; 5697 } 5698 5699 /// Recognize variations of: 5700 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 5701 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 5702 Value *CmpLHS, Value *CmpRHS, 5703 Value *TrueVal, Value *FalseVal) { 5704 // Swap the select operands and predicate to match the patterns below. 5705 if (CmpRHS != TrueVal) { 5706 Pred = ICmpInst::getSwappedPredicate(Pred); 5707 std::swap(TrueVal, FalseVal); 5708 } 5709 const APInt *C1; 5710 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 5711 const APInt *C2; 5712 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 5713 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 5714 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 5715 return {SPF_SMAX, SPNB_NA, false}; 5716 5717 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 5718 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 5719 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 5720 return {SPF_SMIN, SPNB_NA, false}; 5721 5722 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 5723 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 5724 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 5725 return {SPF_UMAX, SPNB_NA, false}; 5726 5727 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 5728 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 5729 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 5730 return {SPF_UMIN, SPNB_NA, false}; 5731 } 5732 return {SPF_UNKNOWN, SPNB_NA, false}; 5733 } 5734 5735 /// Recognize variations of: 5736 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 5737 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 5738 Value *CmpLHS, Value *CmpRHS, 5739 Value *TVal, Value *FVal, 5740 unsigned Depth) { 5741 // TODO: Allow FP min/max with nnan/nsz. 5742 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 5743 5744 Value *A = nullptr, *B = nullptr; 5745 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 5746 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 5747 return {SPF_UNKNOWN, SPNB_NA, false}; 5748 5749 Value *C = nullptr, *D = nullptr; 5750 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 5751 if (L.Flavor != R.Flavor) 5752 return {SPF_UNKNOWN, SPNB_NA, false}; 5753 5754 // We have something like: x Pred y ? min(a, b) : min(c, d). 5755 // Try to match the compare to the min/max operations of the select operands. 5756 // First, make sure we have the right compare predicate. 5757 switch (L.Flavor) { 5758 case SPF_SMIN: 5759 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 5760 Pred = ICmpInst::getSwappedPredicate(Pred); 5761 std::swap(CmpLHS, CmpRHS); 5762 } 5763 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 5764 break; 5765 return {SPF_UNKNOWN, SPNB_NA, false}; 5766 case SPF_SMAX: 5767 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 5768 Pred = ICmpInst::getSwappedPredicate(Pred); 5769 std::swap(CmpLHS, CmpRHS); 5770 } 5771 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 5772 break; 5773 return {SPF_UNKNOWN, SPNB_NA, false}; 5774 case SPF_UMIN: 5775 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 5776 Pred = ICmpInst::getSwappedPredicate(Pred); 5777 std::swap(CmpLHS, CmpRHS); 5778 } 5779 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 5780 break; 5781 return {SPF_UNKNOWN, SPNB_NA, false}; 5782 case SPF_UMAX: 5783 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 5784 Pred = ICmpInst::getSwappedPredicate(Pred); 5785 std::swap(CmpLHS, CmpRHS); 5786 } 5787 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 5788 break; 5789 return {SPF_UNKNOWN, SPNB_NA, false}; 5790 default: 5791 return {SPF_UNKNOWN, SPNB_NA, false}; 5792 } 5793 5794 // If there is a common operand in the already matched min/max and the other 5795 // min/max operands match the compare operands (either directly or inverted), 5796 // then this is min/max of the same flavor. 5797 5798 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 5799 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 5800 if (D == B) { 5801 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 5802 match(A, m_Not(m_Specific(CmpRHS))))) 5803 return {L.Flavor, SPNB_NA, false}; 5804 } 5805 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 5806 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 5807 if (C == B) { 5808 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 5809 match(A, m_Not(m_Specific(CmpRHS))))) 5810 return {L.Flavor, SPNB_NA, false}; 5811 } 5812 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 5813 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 5814 if (D == A) { 5815 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 5816 match(B, m_Not(m_Specific(CmpRHS))))) 5817 return {L.Flavor, SPNB_NA, false}; 5818 } 5819 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 5820 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 5821 if (C == A) { 5822 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 5823 match(B, m_Not(m_Specific(CmpRHS))))) 5824 return {L.Flavor, SPNB_NA, false}; 5825 } 5826 5827 return {SPF_UNKNOWN, SPNB_NA, false}; 5828 } 5829 5830 /// If the input value is the result of a 'not' op, constant integer, or vector 5831 /// splat of a constant integer, return the bitwise-not source value. 5832 /// TODO: This could be extended to handle non-splat vector integer constants. 5833 static Value *getNotValue(Value *V) { 5834 Value *NotV; 5835 if (match(V, m_Not(m_Value(NotV)))) 5836 return NotV; 5837 5838 const APInt *C; 5839 if (match(V, m_APInt(C))) 5840 return ConstantInt::get(V->getType(), ~(*C)); 5841 5842 return nullptr; 5843 } 5844 5845 /// Match non-obvious integer minimum and maximum sequences. 5846 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 5847 Value *CmpLHS, Value *CmpRHS, 5848 Value *TrueVal, Value *FalseVal, 5849 Value *&LHS, Value *&RHS, 5850 unsigned Depth) { 5851 // Assume success. If there's no match, callers should not use these anyway. 5852 LHS = TrueVal; 5853 RHS = FalseVal; 5854 5855 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 5856 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 5857 return SPR; 5858 5859 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 5860 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 5861 return SPR; 5862 5863 // Look through 'not' ops to find disguised min/max. 5864 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y) 5865 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y) 5866 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) { 5867 switch (Pred) { 5868 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false}; 5869 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false}; 5870 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false}; 5871 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false}; 5872 default: break; 5873 } 5874 } 5875 5876 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X) 5877 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X) 5878 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) { 5879 switch (Pred) { 5880 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false}; 5881 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false}; 5882 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false}; 5883 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false}; 5884 default: break; 5885 } 5886 } 5887 5888 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 5889 return {SPF_UNKNOWN, SPNB_NA, false}; 5890 5891 // Z = X -nsw Y 5892 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 5893 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 5894 if (match(TrueVal, m_Zero()) && 5895 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 5896 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 5897 5898 // Z = X -nsw Y 5899 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 5900 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 5901 if (match(FalseVal, m_Zero()) && 5902 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 5903 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 5904 5905 const APInt *C1; 5906 if (!match(CmpRHS, m_APInt(C1))) 5907 return {SPF_UNKNOWN, SPNB_NA, false}; 5908 5909 // An unsigned min/max can be written with a signed compare. 5910 const APInt *C2; 5911 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 5912 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 5913 // Is the sign bit set? 5914 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 5915 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 5916 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue()) 5917 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 5918 5919 // Is the sign bit clear? 5920 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 5921 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 5922 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue()) 5923 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 5924 } 5925 5926 return {SPF_UNKNOWN, SPNB_NA, false}; 5927 } 5928 5929 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 5930 assert(X && Y && "Invalid operand"); 5931 5932 // X = sub (0, Y) || X = sub nsw (0, Y) 5933 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 5934 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 5935 return true; 5936 5937 // Y = sub (0, X) || Y = sub nsw (0, X) 5938 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 5939 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 5940 return true; 5941 5942 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 5943 Value *A, *B; 5944 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 5945 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 5946 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 5947 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 5948 } 5949 5950 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 5951 FastMathFlags FMF, 5952 Value *CmpLHS, Value *CmpRHS, 5953 Value *TrueVal, Value *FalseVal, 5954 Value *&LHS, Value *&RHS, 5955 unsigned Depth) { 5956 if (CmpInst::isFPPredicate(Pred)) { 5957 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one 5958 // 0.0 operand, set the compare's 0.0 operands to that same value for the 5959 // purpose of identifying min/max. Disregard vector constants with undefined 5960 // elements because those can not be back-propagated for analysis. 5961 Value *OutputZeroVal = nullptr; 5962 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && 5963 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement()) 5964 OutputZeroVal = TrueVal; 5965 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && 5966 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement()) 5967 OutputZeroVal = FalseVal; 5968 5969 if (OutputZeroVal) { 5970 if (match(CmpLHS, m_AnyZeroFP())) 5971 CmpLHS = OutputZeroVal; 5972 if (match(CmpRHS, m_AnyZeroFP())) 5973 CmpRHS = OutputZeroVal; 5974 } 5975 } 5976 5977 LHS = CmpLHS; 5978 RHS = CmpRHS; 5979 5980 // Signed zero may return inconsistent results between implementations. 5981 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 5982 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 5983 // Therefore, we behave conservatively and only proceed if at least one of the 5984 // operands is known to not be zero or if we don't care about signed zero. 5985 switch (Pred) { 5986 default: break; 5987 // FIXME: Include OGT/OLT/UGT/ULT. 5988 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 5989 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 5990 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 5991 !isKnownNonZero(CmpRHS)) 5992 return {SPF_UNKNOWN, SPNB_NA, false}; 5993 } 5994 5995 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 5996 bool Ordered = false; 5997 5998 // When given one NaN and one non-NaN input: 5999 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 6000 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 6001 // ordered comparison fails), which could be NaN or non-NaN. 6002 // so here we discover exactly what NaN behavior is required/accepted. 6003 if (CmpInst::isFPPredicate(Pred)) { 6004 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 6005 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 6006 6007 if (LHSSafe && RHSSafe) { 6008 // Both operands are known non-NaN. 6009 NaNBehavior = SPNB_RETURNS_ANY; 6010 } else if (CmpInst::isOrdered(Pred)) { 6011 // An ordered comparison will return false when given a NaN, so it 6012 // returns the RHS. 6013 Ordered = true; 6014 if (LHSSafe) 6015 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 6016 NaNBehavior = SPNB_RETURNS_NAN; 6017 else if (RHSSafe) 6018 NaNBehavior = SPNB_RETURNS_OTHER; 6019 else 6020 // Completely unsafe. 6021 return {SPF_UNKNOWN, SPNB_NA, false}; 6022 } else { 6023 Ordered = false; 6024 // An unordered comparison will return true when given a NaN, so it 6025 // returns the LHS. 6026 if (LHSSafe) 6027 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 6028 NaNBehavior = SPNB_RETURNS_OTHER; 6029 else if (RHSSafe) 6030 NaNBehavior = SPNB_RETURNS_NAN; 6031 else 6032 // Completely unsafe. 6033 return {SPF_UNKNOWN, SPNB_NA, false}; 6034 } 6035 } 6036 6037 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 6038 std::swap(CmpLHS, CmpRHS); 6039 Pred = CmpInst::getSwappedPredicate(Pred); 6040 if (NaNBehavior == SPNB_RETURNS_NAN) 6041 NaNBehavior = SPNB_RETURNS_OTHER; 6042 else if (NaNBehavior == SPNB_RETURNS_OTHER) 6043 NaNBehavior = SPNB_RETURNS_NAN; 6044 Ordered = !Ordered; 6045 } 6046 6047 // ([if]cmp X, Y) ? X : Y 6048 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 6049 switch (Pred) { 6050 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 6051 case ICmpInst::ICMP_UGT: 6052 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 6053 case ICmpInst::ICMP_SGT: 6054 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 6055 case ICmpInst::ICMP_ULT: 6056 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 6057 case ICmpInst::ICMP_SLT: 6058 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 6059 case FCmpInst::FCMP_UGT: 6060 case FCmpInst::FCMP_UGE: 6061 case FCmpInst::FCMP_OGT: 6062 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 6063 case FCmpInst::FCMP_ULT: 6064 case FCmpInst::FCMP_ULE: 6065 case FCmpInst::FCMP_OLT: 6066 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 6067 } 6068 } 6069 6070 if (isKnownNegation(TrueVal, FalseVal)) { 6071 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 6072 // match against either LHS or sext(LHS). 6073 auto MaybeSExtCmpLHS = 6074 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 6075 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 6076 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 6077 if (match(TrueVal, MaybeSExtCmpLHS)) { 6078 // Set the return values. If the compare uses the negated value (-X >s 0), 6079 // swap the return values because the negated value is always 'RHS'. 6080 LHS = TrueVal; 6081 RHS = FalseVal; 6082 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 6083 std::swap(LHS, RHS); 6084 6085 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 6086 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 6087 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 6088 return {SPF_ABS, SPNB_NA, false}; 6089 6090 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) 6091 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne)) 6092 return {SPF_ABS, SPNB_NA, false}; 6093 6094 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 6095 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 6096 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 6097 return {SPF_NABS, SPNB_NA, false}; 6098 } 6099 else if (match(FalseVal, MaybeSExtCmpLHS)) { 6100 // Set the return values. If the compare uses the negated value (-X >s 0), 6101 // swap the return values because the negated value is always 'RHS'. 6102 LHS = FalseVal; 6103 RHS = TrueVal; 6104 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 6105 std::swap(LHS, RHS); 6106 6107 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 6108 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 6109 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 6110 return {SPF_NABS, SPNB_NA, false}; 6111 6112 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 6113 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 6114 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 6115 return {SPF_ABS, SPNB_NA, false}; 6116 } 6117 } 6118 6119 if (CmpInst::isIntPredicate(Pred)) 6120 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 6121 6122 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 6123 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 6124 // semantics than minNum. Be conservative in such case. 6125 if (NaNBehavior != SPNB_RETURNS_ANY || 6126 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 6127 !isKnownNonZero(CmpRHS))) 6128 return {SPF_UNKNOWN, SPNB_NA, false}; 6129 6130 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 6131 } 6132 6133 /// Helps to match a select pattern in case of a type mismatch. 6134 /// 6135 /// The function processes the case when type of true and false values of a 6136 /// select instruction differs from type of the cmp instruction operands because 6137 /// of a cast instruction. The function checks if it is legal to move the cast 6138 /// operation after "select". If yes, it returns the new second value of 6139 /// "select" (with the assumption that cast is moved): 6140 /// 1. As operand of cast instruction when both values of "select" are same cast 6141 /// instructions. 6142 /// 2. As restored constant (by applying reverse cast operation) when the first 6143 /// value of the "select" is a cast operation and the second value is a 6144 /// constant. 6145 /// NOTE: We return only the new second value because the first value could be 6146 /// accessed as operand of cast instruction. 6147 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 6148 Instruction::CastOps *CastOp) { 6149 auto *Cast1 = dyn_cast<CastInst>(V1); 6150 if (!Cast1) 6151 return nullptr; 6152 6153 *CastOp = Cast1->getOpcode(); 6154 Type *SrcTy = Cast1->getSrcTy(); 6155 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 6156 // If V1 and V2 are both the same cast from the same type, look through V1. 6157 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 6158 return Cast2->getOperand(0); 6159 return nullptr; 6160 } 6161 6162 auto *C = dyn_cast<Constant>(V2); 6163 if (!C) 6164 return nullptr; 6165 6166 Constant *CastedTo = nullptr; 6167 switch (*CastOp) { 6168 case Instruction::ZExt: 6169 if (CmpI->isUnsigned()) 6170 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 6171 break; 6172 case Instruction::SExt: 6173 if (CmpI->isSigned()) 6174 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 6175 break; 6176 case Instruction::Trunc: 6177 Constant *CmpConst; 6178 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 6179 CmpConst->getType() == SrcTy) { 6180 // Here we have the following case: 6181 // 6182 // %cond = cmp iN %x, CmpConst 6183 // %tr = trunc iN %x to iK 6184 // %narrowsel = select i1 %cond, iK %t, iK C 6185 // 6186 // We can always move trunc after select operation: 6187 // 6188 // %cond = cmp iN %x, CmpConst 6189 // %widesel = select i1 %cond, iN %x, iN CmpConst 6190 // %tr = trunc iN %widesel to iK 6191 // 6192 // Note that C could be extended in any way because we don't care about 6193 // upper bits after truncation. It can't be abs pattern, because it would 6194 // look like: 6195 // 6196 // select i1 %cond, x, -x. 6197 // 6198 // So only min/max pattern could be matched. Such match requires widened C 6199 // == CmpConst. That is why set widened C = CmpConst, condition trunc 6200 // CmpConst == C is checked below. 6201 CastedTo = CmpConst; 6202 } else { 6203 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 6204 } 6205 break; 6206 case Instruction::FPTrunc: 6207 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 6208 break; 6209 case Instruction::FPExt: 6210 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 6211 break; 6212 case Instruction::FPToUI: 6213 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 6214 break; 6215 case Instruction::FPToSI: 6216 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 6217 break; 6218 case Instruction::UIToFP: 6219 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 6220 break; 6221 case Instruction::SIToFP: 6222 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 6223 break; 6224 default: 6225 break; 6226 } 6227 6228 if (!CastedTo) 6229 return nullptr; 6230 6231 // Make sure the cast doesn't lose any information. 6232 Constant *CastedBack = 6233 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 6234 if (CastedBack != C) 6235 return nullptr; 6236 6237 return CastedTo; 6238 } 6239 6240 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 6241 Instruction::CastOps *CastOp, 6242 unsigned Depth) { 6243 if (Depth >= MaxAnalysisRecursionDepth) 6244 return {SPF_UNKNOWN, SPNB_NA, false}; 6245 6246 SelectInst *SI = dyn_cast<SelectInst>(V); 6247 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 6248 6249 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 6250 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 6251 6252 Value *TrueVal = SI->getTrueValue(); 6253 Value *FalseVal = SI->getFalseValue(); 6254 6255 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS, 6256 CastOp, Depth); 6257 } 6258 6259 SelectPatternResult llvm::matchDecomposedSelectPattern( 6260 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, 6261 Instruction::CastOps *CastOp, unsigned Depth) { 6262 CmpInst::Predicate Pred = CmpI->getPredicate(); 6263 Value *CmpLHS = CmpI->getOperand(0); 6264 Value *CmpRHS = CmpI->getOperand(1); 6265 FastMathFlags FMF; 6266 if (isa<FPMathOperator>(CmpI)) 6267 FMF = CmpI->getFastMathFlags(); 6268 6269 // Bail out early. 6270 if (CmpI->isEquality()) 6271 return {SPF_UNKNOWN, SPNB_NA, false}; 6272 6273 // Deal with type mismatches. 6274 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 6275 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 6276 // If this is a potential fmin/fmax with a cast to integer, then ignore 6277 // -0.0 because there is no corresponding integer value. 6278 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 6279 FMF.setNoSignedZeros(); 6280 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 6281 cast<CastInst>(TrueVal)->getOperand(0), C, 6282 LHS, RHS, Depth); 6283 } 6284 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 6285 // If this is a potential fmin/fmax with a cast to integer, then ignore 6286 // -0.0 because there is no corresponding integer value. 6287 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 6288 FMF.setNoSignedZeros(); 6289 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 6290 C, cast<CastInst>(FalseVal)->getOperand(0), 6291 LHS, RHS, Depth); 6292 } 6293 } 6294 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 6295 LHS, RHS, Depth); 6296 } 6297 6298 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 6299 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 6300 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 6301 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 6302 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 6303 if (SPF == SPF_FMINNUM) 6304 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 6305 if (SPF == SPF_FMAXNUM) 6306 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 6307 llvm_unreachable("unhandled!"); 6308 } 6309 6310 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 6311 if (SPF == SPF_SMIN) return SPF_SMAX; 6312 if (SPF == SPF_UMIN) return SPF_UMAX; 6313 if (SPF == SPF_SMAX) return SPF_SMIN; 6314 if (SPF == SPF_UMAX) return SPF_UMIN; 6315 llvm_unreachable("unhandled!"); 6316 } 6317 6318 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) { 6319 switch (MinMaxID) { 6320 case Intrinsic::smax: return Intrinsic::smin; 6321 case Intrinsic::smin: return Intrinsic::smax; 6322 case Intrinsic::umax: return Intrinsic::umin; 6323 case Intrinsic::umin: return Intrinsic::umax; 6324 default: llvm_unreachable("Unexpected intrinsic"); 6325 } 6326 } 6327 6328 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 6329 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 6330 } 6331 6332 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) { 6333 switch (SPF) { 6334 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth); 6335 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth); 6336 case SPF_UMAX: return APInt::getMaxValue(BitWidth); 6337 case SPF_UMIN: return APInt::getMinValue(BitWidth); 6338 default: llvm_unreachable("Unexpected flavor"); 6339 } 6340 } 6341 6342 std::pair<Intrinsic::ID, bool> 6343 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) { 6344 // Check if VL contains select instructions that can be folded into a min/max 6345 // vector intrinsic and return the intrinsic if it is possible. 6346 // TODO: Support floating point min/max. 6347 bool AllCmpSingleUse = true; 6348 SelectPatternResult SelectPattern; 6349 SelectPattern.Flavor = SPF_UNKNOWN; 6350 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) { 6351 Value *LHS, *RHS; 6352 auto CurrentPattern = matchSelectPattern(I, LHS, RHS); 6353 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) || 6354 CurrentPattern.Flavor == SPF_FMINNUM || 6355 CurrentPattern.Flavor == SPF_FMAXNUM || 6356 !I->getType()->isIntOrIntVectorTy()) 6357 return false; 6358 if (SelectPattern.Flavor != SPF_UNKNOWN && 6359 SelectPattern.Flavor != CurrentPattern.Flavor) 6360 return false; 6361 SelectPattern = CurrentPattern; 6362 AllCmpSingleUse &= 6363 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value())); 6364 return true; 6365 })) { 6366 switch (SelectPattern.Flavor) { 6367 case SPF_SMIN: 6368 return {Intrinsic::smin, AllCmpSingleUse}; 6369 case SPF_UMIN: 6370 return {Intrinsic::umin, AllCmpSingleUse}; 6371 case SPF_SMAX: 6372 return {Intrinsic::smax, AllCmpSingleUse}; 6373 case SPF_UMAX: 6374 return {Intrinsic::umax, AllCmpSingleUse}; 6375 default: 6376 llvm_unreachable("unexpected select pattern flavor"); 6377 } 6378 } 6379 return {Intrinsic::not_intrinsic, false}; 6380 } 6381 6382 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, 6383 Value *&Start, Value *&Step) { 6384 // Handle the case of a simple two-predecessor recurrence PHI. 6385 // There's a lot more that could theoretically be done here, but 6386 // this is sufficient to catch some interesting cases. 6387 if (P->getNumIncomingValues() != 2) 6388 return false; 6389 6390 for (unsigned i = 0; i != 2; ++i) { 6391 Value *L = P->getIncomingValue(i); 6392 Value *R = P->getIncomingValue(!i); 6393 Operator *LU = dyn_cast<Operator>(L); 6394 if (!LU) 6395 continue; 6396 unsigned Opcode = LU->getOpcode(); 6397 6398 switch (Opcode) { 6399 default: 6400 continue; 6401 // TODO: Expand list -- xor, div, gep, uaddo, etc.. 6402 case Instruction::LShr: 6403 case Instruction::AShr: 6404 case Instruction::Shl: 6405 case Instruction::Add: 6406 case Instruction::Sub: 6407 case Instruction::And: 6408 case Instruction::Or: 6409 case Instruction::Mul: { 6410 Value *LL = LU->getOperand(0); 6411 Value *LR = LU->getOperand(1); 6412 // Find a recurrence. 6413 if (LL == P) 6414 L = LR; 6415 else if (LR == P) 6416 L = LL; 6417 else 6418 continue; // Check for recurrence with L and R flipped. 6419 6420 break; // Match! 6421 } 6422 }; 6423 6424 // We have matched a recurrence of the form: 6425 // %iv = [R, %entry], [%iv.next, %backedge] 6426 // %iv.next = binop %iv, L 6427 // OR 6428 // %iv = [R, %entry], [%iv.next, %backedge] 6429 // %iv.next = binop L, %iv 6430 BO = cast<BinaryOperator>(LU); 6431 Start = R; 6432 Step = L; 6433 return true; 6434 } 6435 return false; 6436 } 6437 6438 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, 6439 Value *&Start, Value *&Step) { 6440 BinaryOperator *BO = nullptr; 6441 P = dyn_cast<PHINode>(I->getOperand(0)); 6442 if (!P) 6443 P = dyn_cast<PHINode>(I->getOperand(1)); 6444 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I; 6445 } 6446 6447 /// Return true if "icmp Pred LHS RHS" is always true. 6448 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 6449 const Value *RHS, const DataLayout &DL, 6450 unsigned Depth) { 6451 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 6452 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 6453 return true; 6454 6455 switch (Pred) { 6456 default: 6457 return false; 6458 6459 case CmpInst::ICMP_SLE: { 6460 const APInt *C; 6461 6462 // LHS s<= LHS +_{nsw} C if C >= 0 6463 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 6464 return !C->isNegative(); 6465 return false; 6466 } 6467 6468 case CmpInst::ICMP_ULE: { 6469 const APInt *C; 6470 6471 // LHS u<= LHS +_{nuw} C for any C 6472 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 6473 return true; 6474 6475 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 6476 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 6477 const Value *&X, 6478 const APInt *&CA, const APInt *&CB) { 6479 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 6480 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 6481 return true; 6482 6483 // If X & C == 0 then (X | C) == X +_{nuw} C 6484 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 6485 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 6486 KnownBits Known(CA->getBitWidth()); 6487 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 6488 /*CxtI*/ nullptr, /*DT*/ nullptr); 6489 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 6490 return true; 6491 } 6492 6493 return false; 6494 }; 6495 6496 const Value *X; 6497 const APInt *CLHS, *CRHS; 6498 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 6499 return CLHS->ule(*CRHS); 6500 6501 return false; 6502 } 6503 } 6504 } 6505 6506 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 6507 /// ALHS ARHS" is true. Otherwise, return None. 6508 static Optional<bool> 6509 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 6510 const Value *ARHS, const Value *BLHS, const Value *BRHS, 6511 const DataLayout &DL, unsigned Depth) { 6512 switch (Pred) { 6513 default: 6514 return None; 6515 6516 case CmpInst::ICMP_SLT: 6517 case CmpInst::ICMP_SLE: 6518 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 6519 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 6520 return true; 6521 return None; 6522 6523 case CmpInst::ICMP_ULT: 6524 case CmpInst::ICMP_ULE: 6525 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 6526 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 6527 return true; 6528 return None; 6529 } 6530 } 6531 6532 /// Return true if the operands of the two compares match. IsSwappedOps is true 6533 /// when the operands match, but are swapped. 6534 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 6535 const Value *BLHS, const Value *BRHS, 6536 bool &IsSwappedOps) { 6537 6538 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 6539 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 6540 return IsMatchingOps || IsSwappedOps; 6541 } 6542 6543 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true. 6544 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false. 6545 /// Otherwise, return None if we can't infer anything. 6546 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 6547 CmpInst::Predicate BPred, 6548 bool AreSwappedOps) { 6549 // Canonicalize the predicate as if the operands were not commuted. 6550 if (AreSwappedOps) 6551 BPred = ICmpInst::getSwappedPredicate(BPred); 6552 6553 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 6554 return true; 6555 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 6556 return false; 6557 6558 return None; 6559 } 6560 6561 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true. 6562 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false. 6563 /// Otherwise, return None if we can't infer anything. 6564 static Optional<bool> 6565 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, 6566 const ConstantInt *C1, 6567 CmpInst::Predicate BPred, 6568 const ConstantInt *C2) { 6569 ConstantRange DomCR = 6570 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 6571 ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2->getValue()); 6572 ConstantRange Intersection = DomCR.intersectWith(CR); 6573 ConstantRange Difference = DomCR.difference(CR); 6574 if (Intersection.isEmptySet()) 6575 return false; 6576 if (Difference.isEmptySet()) 6577 return true; 6578 return None; 6579 } 6580 6581 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 6582 /// false. Otherwise, return None if we can't infer anything. 6583 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 6584 CmpInst::Predicate BPred, 6585 const Value *BLHS, const Value *BRHS, 6586 const DataLayout &DL, bool LHSIsTrue, 6587 unsigned Depth) { 6588 Value *ALHS = LHS->getOperand(0); 6589 Value *ARHS = LHS->getOperand(1); 6590 6591 // The rest of the logic assumes the LHS condition is true. If that's not the 6592 // case, invert the predicate to make it so. 6593 CmpInst::Predicate APred = 6594 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 6595 6596 // Can we infer anything when the two compares have matching operands? 6597 bool AreSwappedOps; 6598 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) { 6599 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 6600 APred, BPred, AreSwappedOps)) 6601 return Implication; 6602 // No amount of additional analysis will infer the second condition, so 6603 // early exit. 6604 return None; 6605 } 6606 6607 // Can we infer anything when the LHS operands match and the RHS operands are 6608 // constants (not necessarily matching)? 6609 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 6610 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 6611 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS))) 6612 return Implication; 6613 // No amount of additional analysis will infer the second condition, so 6614 // early exit. 6615 return None; 6616 } 6617 6618 if (APred == BPred) 6619 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 6620 return None; 6621 } 6622 6623 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 6624 /// false. Otherwise, return None if we can't infer anything. We expect the 6625 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction. 6626 static Optional<bool> 6627 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred, 6628 const Value *RHSOp0, const Value *RHSOp1, 6629 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 6630 // The LHS must be an 'or', 'and', or a 'select' instruction. 6631 assert((LHS->getOpcode() == Instruction::And || 6632 LHS->getOpcode() == Instruction::Or || 6633 LHS->getOpcode() == Instruction::Select) && 6634 "Expected LHS to be 'and', 'or', or 'select'."); 6635 6636 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit"); 6637 6638 // If the result of an 'or' is false, then we know both legs of the 'or' are 6639 // false. Similarly, if the result of an 'and' is true, then we know both 6640 // legs of the 'and' are true. 6641 const Value *ALHS, *ARHS; 6642 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) || 6643 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) { 6644 // FIXME: Make this non-recursion. 6645 if (Optional<bool> Implication = isImpliedCondition( 6646 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 6647 return Implication; 6648 if (Optional<bool> Implication = isImpliedCondition( 6649 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 6650 return Implication; 6651 return None; 6652 } 6653 return None; 6654 } 6655 6656 Optional<bool> 6657 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred, 6658 const Value *RHSOp0, const Value *RHSOp1, 6659 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 6660 // Bail out when we hit the limit. 6661 if (Depth == MaxAnalysisRecursionDepth) 6662 return None; 6663 6664 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 6665 // example. 6666 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) 6667 return None; 6668 6669 Type *OpTy = LHS->getType(); 6670 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 6671 6672 // FIXME: Extending the code below to handle vectors. 6673 if (OpTy->isVectorTy()) 6674 return None; 6675 6676 assert(OpTy->isIntegerTy(1) && "implied by above"); 6677 6678 // Both LHS and RHS are icmps. 6679 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 6680 if (LHSCmp) 6681 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 6682 Depth); 6683 6684 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect 6685 /// the RHS to be an icmp. 6686 /// FIXME: Add support for and/or/select on the RHS. 6687 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) { 6688 if ((LHSI->getOpcode() == Instruction::And || 6689 LHSI->getOpcode() == Instruction::Or || 6690 LHSI->getOpcode() == Instruction::Select)) 6691 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 6692 Depth); 6693 } 6694 return None; 6695 } 6696 6697 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 6698 const DataLayout &DL, bool LHSIsTrue, 6699 unsigned Depth) { 6700 // LHS ==> RHS by definition 6701 if (LHS == RHS) 6702 return LHSIsTrue; 6703 6704 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 6705 if (RHSCmp) 6706 return isImpliedCondition(LHS, RHSCmp->getPredicate(), 6707 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL, 6708 LHSIsTrue, Depth); 6709 return None; 6710 } 6711 6712 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch 6713 // condition dominating ContextI or nullptr, if no condition is found. 6714 static std::pair<Value *, bool> 6715 getDomPredecessorCondition(const Instruction *ContextI) { 6716 if (!ContextI || !ContextI->getParent()) 6717 return {nullptr, false}; 6718 6719 // TODO: This is a poor/cheap way to determine dominance. Should we use a 6720 // dominator tree (eg, from a SimplifyQuery) instead? 6721 const BasicBlock *ContextBB = ContextI->getParent(); 6722 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); 6723 if (!PredBB) 6724 return {nullptr, false}; 6725 6726 // We need a conditional branch in the predecessor. 6727 Value *PredCond; 6728 BasicBlock *TrueBB, *FalseBB; 6729 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) 6730 return {nullptr, false}; 6731 6732 // The branch should get simplified. Don't bother simplifying this condition. 6733 if (TrueBB == FalseBB) 6734 return {nullptr, false}; 6735 6736 assert((TrueBB == ContextBB || FalseBB == ContextBB) && 6737 "Predecessor block does not point to successor?"); 6738 6739 // Is this condition implied by the predecessor condition? 6740 return {PredCond, TrueBB == ContextBB}; 6741 } 6742 6743 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, 6744 const Instruction *ContextI, 6745 const DataLayout &DL) { 6746 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); 6747 auto PredCond = getDomPredecessorCondition(ContextI); 6748 if (PredCond.first) 6749 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second); 6750 return None; 6751 } 6752 6753 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred, 6754 const Value *LHS, const Value *RHS, 6755 const Instruction *ContextI, 6756 const DataLayout &DL) { 6757 auto PredCond = getDomPredecessorCondition(ContextI); 6758 if (PredCond.first) 6759 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL, 6760 PredCond.second); 6761 return None; 6762 } 6763 6764 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, 6765 APInt &Upper, const InstrInfoQuery &IIQ) { 6766 unsigned Width = Lower.getBitWidth(); 6767 const APInt *C; 6768 switch (BO.getOpcode()) { 6769 case Instruction::Add: 6770 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { 6771 // FIXME: If we have both nuw and nsw, we should reduce the range further. 6772 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) { 6773 // 'add nuw x, C' produces [C, UINT_MAX]. 6774 Lower = *C; 6775 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) { 6776 if (C->isNegative()) { 6777 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. 6778 Lower = APInt::getSignedMinValue(Width); 6779 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 6780 } else { 6781 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. 6782 Lower = APInt::getSignedMinValue(Width) + *C; 6783 Upper = APInt::getSignedMaxValue(Width) + 1; 6784 } 6785 } 6786 } 6787 break; 6788 6789 case Instruction::And: 6790 if (match(BO.getOperand(1), m_APInt(C))) 6791 // 'and x, C' produces [0, C]. 6792 Upper = *C + 1; 6793 break; 6794 6795 case Instruction::Or: 6796 if (match(BO.getOperand(1), m_APInt(C))) 6797 // 'or x, C' produces [C, UINT_MAX]. 6798 Lower = *C; 6799 break; 6800 6801 case Instruction::AShr: 6802 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 6803 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. 6804 Lower = APInt::getSignedMinValue(Width).ashr(*C); 6805 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; 6806 } else if (match(BO.getOperand(0), m_APInt(C))) { 6807 unsigned ShiftAmount = Width - 1; 6808 if (!C->isZero() && IIQ.isExact(&BO)) 6809 ShiftAmount = C->countTrailingZeros(); 6810 if (C->isNegative()) { 6811 // 'ashr C, x' produces [C, C >> (Width-1)] 6812 Lower = *C; 6813 Upper = C->ashr(ShiftAmount) + 1; 6814 } else { 6815 // 'ashr C, x' produces [C >> (Width-1), C] 6816 Lower = C->ashr(ShiftAmount); 6817 Upper = *C + 1; 6818 } 6819 } 6820 break; 6821 6822 case Instruction::LShr: 6823 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 6824 // 'lshr x, C' produces [0, UINT_MAX >> C]. 6825 Upper = APInt::getAllOnes(Width).lshr(*C) + 1; 6826 } else if (match(BO.getOperand(0), m_APInt(C))) { 6827 // 'lshr C, x' produces [C >> (Width-1), C]. 6828 unsigned ShiftAmount = Width - 1; 6829 if (!C->isZero() && IIQ.isExact(&BO)) 6830 ShiftAmount = C->countTrailingZeros(); 6831 Lower = C->lshr(ShiftAmount); 6832 Upper = *C + 1; 6833 } 6834 break; 6835 6836 case Instruction::Shl: 6837 if (match(BO.getOperand(0), m_APInt(C))) { 6838 if (IIQ.hasNoUnsignedWrap(&BO)) { 6839 // 'shl nuw C, x' produces [C, C << CLZ(C)] 6840 Lower = *C; 6841 Upper = Lower.shl(Lower.countLeadingZeros()) + 1; 6842 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? 6843 if (C->isNegative()) { 6844 // 'shl nsw C, x' produces [C << CLO(C)-1, C] 6845 unsigned ShiftAmount = C->countLeadingOnes() - 1; 6846 Lower = C->shl(ShiftAmount); 6847 Upper = *C + 1; 6848 } else { 6849 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] 6850 unsigned ShiftAmount = C->countLeadingZeros() - 1; 6851 Lower = *C; 6852 Upper = C->shl(ShiftAmount) + 1; 6853 } 6854 } 6855 } 6856 break; 6857 6858 case Instruction::SDiv: 6859 if (match(BO.getOperand(1), m_APInt(C))) { 6860 APInt IntMin = APInt::getSignedMinValue(Width); 6861 APInt IntMax = APInt::getSignedMaxValue(Width); 6862 if (C->isAllOnes()) { 6863 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 6864 // where C != -1 and C != 0 and C != 1 6865 Lower = IntMin + 1; 6866 Upper = IntMax + 1; 6867 } else if (C->countLeadingZeros() < Width - 1) { 6868 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] 6869 // where C != -1 and C != 0 and C != 1 6870 Lower = IntMin.sdiv(*C); 6871 Upper = IntMax.sdiv(*C); 6872 if (Lower.sgt(Upper)) 6873 std::swap(Lower, Upper); 6874 Upper = Upper + 1; 6875 assert(Upper != Lower && "Upper part of range has wrapped!"); 6876 } 6877 } else if (match(BO.getOperand(0), m_APInt(C))) { 6878 if (C->isMinSignedValue()) { 6879 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 6880 Lower = *C; 6881 Upper = Lower.lshr(1) + 1; 6882 } else { 6883 // 'sdiv C, x' produces [-|C|, |C|]. 6884 Upper = C->abs() + 1; 6885 Lower = (-Upper) + 1; 6886 } 6887 } 6888 break; 6889 6890 case Instruction::UDiv: 6891 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { 6892 // 'udiv x, C' produces [0, UINT_MAX / C]. 6893 Upper = APInt::getMaxValue(Width).udiv(*C) + 1; 6894 } else if (match(BO.getOperand(0), m_APInt(C))) { 6895 // 'udiv C, x' produces [0, C]. 6896 Upper = *C + 1; 6897 } 6898 break; 6899 6900 case Instruction::SRem: 6901 if (match(BO.getOperand(1), m_APInt(C))) { 6902 // 'srem x, C' produces (-|C|, |C|). 6903 Upper = C->abs(); 6904 Lower = (-Upper) + 1; 6905 } 6906 break; 6907 6908 case Instruction::URem: 6909 if (match(BO.getOperand(1), m_APInt(C))) 6910 // 'urem x, C' produces [0, C). 6911 Upper = *C; 6912 break; 6913 6914 default: 6915 break; 6916 } 6917 } 6918 6919 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower, 6920 APInt &Upper) { 6921 unsigned Width = Lower.getBitWidth(); 6922 const APInt *C; 6923 switch (II.getIntrinsicID()) { 6924 case Intrinsic::ctpop: 6925 case Intrinsic::ctlz: 6926 case Intrinsic::cttz: 6927 // Maximum of set/clear bits is the bit width. 6928 assert(Lower == 0 && "Expected lower bound to be zero"); 6929 Upper = Width + 1; 6930 break; 6931 case Intrinsic::uadd_sat: 6932 // uadd.sat(x, C) produces [C, UINT_MAX]. 6933 if (match(II.getOperand(0), m_APInt(C)) || 6934 match(II.getOperand(1), m_APInt(C))) 6935 Lower = *C; 6936 break; 6937 case Intrinsic::sadd_sat: 6938 if (match(II.getOperand(0), m_APInt(C)) || 6939 match(II.getOperand(1), m_APInt(C))) { 6940 if (C->isNegative()) { 6941 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. 6942 Lower = APInt::getSignedMinValue(Width); 6943 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 6944 } else { 6945 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. 6946 Lower = APInt::getSignedMinValue(Width) + *C; 6947 Upper = APInt::getSignedMaxValue(Width) + 1; 6948 } 6949 } 6950 break; 6951 case Intrinsic::usub_sat: 6952 // usub.sat(C, x) produces [0, C]. 6953 if (match(II.getOperand(0), m_APInt(C))) 6954 Upper = *C + 1; 6955 // usub.sat(x, C) produces [0, UINT_MAX - C]. 6956 else if (match(II.getOperand(1), m_APInt(C))) 6957 Upper = APInt::getMaxValue(Width) - *C + 1; 6958 break; 6959 case Intrinsic::ssub_sat: 6960 if (match(II.getOperand(0), m_APInt(C))) { 6961 if (C->isNegative()) { 6962 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. 6963 Lower = APInt::getSignedMinValue(Width); 6964 Upper = *C - APInt::getSignedMinValue(Width) + 1; 6965 } else { 6966 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. 6967 Lower = *C - APInt::getSignedMaxValue(Width); 6968 Upper = APInt::getSignedMaxValue(Width) + 1; 6969 } 6970 } else if (match(II.getOperand(1), m_APInt(C))) { 6971 if (C->isNegative()) { 6972 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: 6973 Lower = APInt::getSignedMinValue(Width) - *C; 6974 Upper = APInt::getSignedMaxValue(Width) + 1; 6975 } else { 6976 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. 6977 Lower = APInt::getSignedMinValue(Width); 6978 Upper = APInt::getSignedMaxValue(Width) - *C + 1; 6979 } 6980 } 6981 break; 6982 case Intrinsic::umin: 6983 case Intrinsic::umax: 6984 case Intrinsic::smin: 6985 case Intrinsic::smax: 6986 if (!match(II.getOperand(0), m_APInt(C)) && 6987 !match(II.getOperand(1), m_APInt(C))) 6988 break; 6989 6990 switch (II.getIntrinsicID()) { 6991 case Intrinsic::umin: 6992 Upper = *C + 1; 6993 break; 6994 case Intrinsic::umax: 6995 Lower = *C; 6996 break; 6997 case Intrinsic::smin: 6998 Lower = APInt::getSignedMinValue(Width); 6999 Upper = *C + 1; 7000 break; 7001 case Intrinsic::smax: 7002 Lower = *C; 7003 Upper = APInt::getSignedMaxValue(Width) + 1; 7004 break; 7005 default: 7006 llvm_unreachable("Must be min/max intrinsic"); 7007 } 7008 break; 7009 case Intrinsic::abs: 7010 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX], 7011 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 7012 if (match(II.getOperand(1), m_One())) 7013 Upper = APInt::getSignedMaxValue(Width) + 1; 7014 else 7015 Upper = APInt::getSignedMinValue(Width) + 1; 7016 break; 7017 default: 7018 break; 7019 } 7020 } 7021 7022 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower, 7023 APInt &Upper, const InstrInfoQuery &IIQ) { 7024 const Value *LHS = nullptr, *RHS = nullptr; 7025 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS); 7026 if (R.Flavor == SPF_UNKNOWN) 7027 return; 7028 7029 unsigned BitWidth = SI.getType()->getScalarSizeInBits(); 7030 7031 if (R.Flavor == SelectPatternFlavor::SPF_ABS) { 7032 // If the negation part of the abs (in RHS) has the NSW flag, 7033 // then the result of abs(X) is [0..SIGNED_MAX], 7034 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 7035 Lower = APInt::getZero(BitWidth); 7036 if (match(RHS, m_Neg(m_Specific(LHS))) && 7037 IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 7038 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 7039 else 7040 Upper = APInt::getSignedMinValue(BitWidth) + 1; 7041 return; 7042 } 7043 7044 if (R.Flavor == SelectPatternFlavor::SPF_NABS) { 7045 // The result of -abs(X) is <= 0. 7046 Lower = APInt::getSignedMinValue(BitWidth); 7047 Upper = APInt(BitWidth, 1); 7048 return; 7049 } 7050 7051 const APInt *C; 7052 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C))) 7053 return; 7054 7055 switch (R.Flavor) { 7056 case SPF_UMIN: 7057 Upper = *C + 1; 7058 break; 7059 case SPF_UMAX: 7060 Lower = *C; 7061 break; 7062 case SPF_SMIN: 7063 Lower = APInt::getSignedMinValue(BitWidth); 7064 Upper = *C + 1; 7065 break; 7066 case SPF_SMAX: 7067 Lower = *C; 7068 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 7069 break; 7070 default: 7071 break; 7072 } 7073 } 7074 7075 static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) { 7076 // The maximum representable value of a half is 65504. For floats the maximum 7077 // value is 3.4e38 which requires roughly 129 bits. 7078 unsigned BitWidth = I->getType()->getScalarSizeInBits(); 7079 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy()) 7080 return; 7081 if (isa<FPToSIInst>(I) && BitWidth >= 17) { 7082 Lower = APInt(BitWidth, -65504); 7083 Upper = APInt(BitWidth, 65505); 7084 } 7085 7086 if (isa<FPToUIInst>(I) && BitWidth >= 16) { 7087 // For a fptoui the lower limit is left as 0. 7088 Upper = APInt(BitWidth, 65505); 7089 } 7090 } 7091 7092 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo, 7093 AssumptionCache *AC, 7094 const Instruction *CtxI, 7095 const DominatorTree *DT, 7096 unsigned Depth) { 7097 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); 7098 7099 if (Depth == MaxAnalysisRecursionDepth) 7100 return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); 7101 7102 const APInt *C; 7103 if (match(V, m_APInt(C))) 7104 return ConstantRange(*C); 7105 7106 InstrInfoQuery IIQ(UseInstrInfo); 7107 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 7108 APInt Lower = APInt(BitWidth, 0); 7109 APInt Upper = APInt(BitWidth, 0); 7110 if (auto *BO = dyn_cast<BinaryOperator>(V)) 7111 setLimitsForBinOp(*BO, Lower, Upper, IIQ); 7112 else if (auto *II = dyn_cast<IntrinsicInst>(V)) 7113 setLimitsForIntrinsic(*II, Lower, Upper); 7114 else if (auto *SI = dyn_cast<SelectInst>(V)) 7115 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ); 7116 else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) 7117 setLimitForFPToI(cast<Instruction>(V), Lower, Upper); 7118 7119 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper); 7120 7121 if (auto *I = dyn_cast<Instruction>(V)) 7122 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range)) 7123 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range)); 7124 7125 if (CtxI && AC) { 7126 // Try to restrict the range based on information from assumptions. 7127 for (auto &AssumeVH : AC->assumptionsFor(V)) { 7128 if (!AssumeVH) 7129 continue; 7130 CallInst *I = cast<CallInst>(AssumeVH); 7131 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() && 7132 "Got assumption for the wrong function!"); 7133 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 7134 "must be an assume intrinsic"); 7135 7136 if (!isValidAssumeForContext(I, CtxI, DT)) 7137 continue; 7138 Value *Arg = I->getArgOperand(0); 7139 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 7140 // Currently we just use information from comparisons. 7141 if (!Cmp || Cmp->getOperand(0) != V) 7142 continue; 7143 ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo, 7144 AC, I, DT, Depth + 1); 7145 CR = CR.intersectWith( 7146 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS)); 7147 } 7148 } 7149 7150 return CR; 7151 } 7152 7153 static Optional<int64_t> 7154 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) { 7155 // Skip over the first indices. 7156 gep_type_iterator GTI = gep_type_begin(GEP); 7157 for (unsigned i = 1; i != Idx; ++i, ++GTI) 7158 /*skip along*/; 7159 7160 // Compute the offset implied by the rest of the indices. 7161 int64_t Offset = 0; 7162 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 7163 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 7164 if (!OpC) 7165 return None; 7166 if (OpC->isZero()) 7167 continue; // No offset. 7168 7169 // Handle struct indices, which add their field offset to the pointer. 7170 if (StructType *STy = GTI.getStructTypeOrNull()) { 7171 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 7172 continue; 7173 } 7174 7175 // Otherwise, we have a sequential type like an array or fixed-length 7176 // vector. Multiply the index by the ElementSize. 7177 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType()); 7178 if (Size.isScalable()) 7179 return None; 7180 Offset += Size.getFixedSize() * OpC->getSExtValue(); 7181 } 7182 7183 return Offset; 7184 } 7185 7186 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2, 7187 const DataLayout &DL) { 7188 Ptr1 = Ptr1->stripPointerCasts(); 7189 Ptr2 = Ptr2->stripPointerCasts(); 7190 7191 // Handle the trivial case first. 7192 if (Ptr1 == Ptr2) { 7193 return 0; 7194 } 7195 7196 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 7197 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 7198 7199 // If one pointer is a GEP see if the GEP is a constant offset from the base, 7200 // as in "P" and "gep P, 1". 7201 // Also do this iteratively to handle the the following case: 7202 // Ptr_t1 = GEP Ptr1, c1 7203 // Ptr_t2 = GEP Ptr_t1, c2 7204 // Ptr2 = GEP Ptr_t2, c3 7205 // where we will return c1+c2+c3. 7206 // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base 7207 // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases 7208 // are the same, and return the difference between offsets. 7209 auto getOffsetFromBase = [&DL](const GEPOperator *GEP, 7210 const Value *Ptr) -> Optional<int64_t> { 7211 const GEPOperator *GEP_T = GEP; 7212 int64_t OffsetVal = 0; 7213 bool HasSameBase = false; 7214 while (GEP_T) { 7215 auto Offset = getOffsetFromIndex(GEP_T, 1, DL); 7216 if (!Offset) 7217 return None; 7218 OffsetVal += *Offset; 7219 auto Op0 = GEP_T->getOperand(0)->stripPointerCasts(); 7220 if (Op0 == Ptr) { 7221 HasSameBase = true; 7222 break; 7223 } 7224 GEP_T = dyn_cast<GEPOperator>(Op0); 7225 } 7226 if (!HasSameBase) 7227 return None; 7228 return OffsetVal; 7229 }; 7230 7231 if (GEP1) { 7232 auto Offset = getOffsetFromBase(GEP1, Ptr2); 7233 if (Offset) 7234 return -*Offset; 7235 } 7236 if (GEP2) { 7237 auto Offset = getOffsetFromBase(GEP2, Ptr1); 7238 if (Offset) 7239 return Offset; 7240 } 7241 7242 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 7243 // base. After that base, they may have some number of common (and 7244 // potentially variable) indices. After that they handle some constant 7245 // offset, which determines their offset from each other. At this point, we 7246 // handle no other case. 7247 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 7248 return None; 7249 7250 // Skip any common indices and track the GEP types. 7251 unsigned Idx = 1; 7252 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 7253 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 7254 break; 7255 7256 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL); 7257 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL); 7258 if (!Offset1 || !Offset2) 7259 return None; 7260 return *Offset2 - *Offset1; 7261 } 7262