1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DepthFirstIterator.h" 66 #include "llvm/ADT/EquivalenceClasses.h" 67 #include "llvm/ADT/FoldingSet.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/ScopeExit.h" 72 #include "llvm/ADT/Sequence.h" 73 #include "llvm/ADT/SetVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallSet.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/Statistic.h" 78 #include "llvm/ADT/StringRef.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/ConstantFolding.h" 81 #include "llvm/Analysis/InstructionSimplify.h" 82 #include "llvm/Analysis/LoopInfo.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/IR/Argument.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/CallSite.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/Pass.h" 115 #include "llvm/Support/Casting.h" 116 #include "llvm/Support/CommandLine.h" 117 #include "llvm/Support/Compiler.h" 118 #include "llvm/Support/Debug.h" 119 #include "llvm/Support/ErrorHandling.h" 120 #include "llvm/Support/KnownBits.h" 121 #include "llvm/Support/SaveAndRestore.h" 122 #include "llvm/Support/raw_ostream.h" 123 #include <algorithm> 124 #include <cassert> 125 #include <climits> 126 #include <cstddef> 127 #include <cstdint> 128 #include <cstdlib> 129 #include <map> 130 #include <memory> 131 #include <tuple> 132 #include <utility> 133 #include <vector> 134 135 using namespace llvm; 136 137 #define DEBUG_TYPE "scalar-evolution" 138 139 STATISTIC(NumArrayLenItCounts, 140 "Number of trip counts computed with array length"); 141 STATISTIC(NumTripCountsComputed, 142 "Number of loops with predictable loop counts"); 143 STATISTIC(NumTripCountsNotComputed, 144 "Number of loops without predictable loop counts"); 145 STATISTIC(NumBruteForceTripCountsComputed, 146 "Number of loops with trip counts computed by force"); 147 148 static cl::opt<unsigned> 149 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 150 cl::desc("Maximum number of iterations SCEV will " 151 "symbolically execute a constant " 152 "derived loop"), 153 cl::init(100)); 154 155 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 156 static cl::opt<bool> VerifySCEV( 157 "verify-scev", cl::Hidden, 158 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 159 static cl::opt<bool> 160 VerifySCEVMap("verify-scev-maps", cl::Hidden, 161 cl::desc("Verify no dangling value in ScalarEvolution's " 162 "ExprValueMap (slow)")); 163 164 static cl::opt<unsigned> MulOpsInlineThreshold( 165 "scev-mulops-inline-threshold", cl::Hidden, 166 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 167 cl::init(32)); 168 169 static cl::opt<unsigned> AddOpsInlineThreshold( 170 "scev-addops-inline-threshold", cl::Hidden, 171 cl::desc("Threshold for inlining addition operands into a SCEV"), 172 cl::init(500)); 173 174 static cl::opt<unsigned> MaxSCEVCompareDepth( 175 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 176 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 177 cl::init(32)); 178 179 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 180 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 181 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 182 cl::init(2)); 183 184 static cl::opt<unsigned> MaxValueCompareDepth( 185 "scalar-evolution-max-value-compare-depth", cl::Hidden, 186 cl::desc("Maximum depth of recursive value complexity comparisons"), 187 cl::init(2)); 188 189 static cl::opt<unsigned> 190 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 191 cl::desc("Maximum depth of recursive arithmetics"), 192 cl::init(32)); 193 194 static cl::opt<unsigned> MaxConstantEvolvingDepth( 195 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 196 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 197 198 static cl::opt<unsigned> 199 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 200 cl::desc("Maximum depth of recursive SExt/ZExt"), 201 cl::init(8)); 202 203 static cl::opt<unsigned> 204 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 205 cl::desc("Max coefficients in AddRec during evolving"), 206 cl::init(16)); 207 208 //===----------------------------------------------------------------------===// 209 // SCEV class definitions 210 //===----------------------------------------------------------------------===// 211 212 //===----------------------------------------------------------------------===// 213 // Implementation of the SCEV class. 214 // 215 216 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 217 LLVM_DUMP_METHOD void SCEV::dump() const { 218 print(dbgs()); 219 dbgs() << '\n'; 220 } 221 #endif 222 223 void SCEV::print(raw_ostream &OS) const { 224 switch (static_cast<SCEVTypes>(getSCEVType())) { 225 case scConstant: 226 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 227 return; 228 case scTruncate: { 229 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 230 const SCEV *Op = Trunc->getOperand(); 231 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 232 << *Trunc->getType() << ")"; 233 return; 234 } 235 case scZeroExtend: { 236 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 237 const SCEV *Op = ZExt->getOperand(); 238 OS << "(zext " << *Op->getType() << " " << *Op << " to " 239 << *ZExt->getType() << ")"; 240 return; 241 } 242 case scSignExtend: { 243 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 244 const SCEV *Op = SExt->getOperand(); 245 OS << "(sext " << *Op->getType() << " " << *Op << " to " 246 << *SExt->getType() << ")"; 247 return; 248 } 249 case scAddRecExpr: { 250 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 251 OS << "{" << *AR->getOperand(0); 252 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 253 OS << ",+," << *AR->getOperand(i); 254 OS << "}<"; 255 if (AR->hasNoUnsignedWrap()) 256 OS << "nuw><"; 257 if (AR->hasNoSignedWrap()) 258 OS << "nsw><"; 259 if (AR->hasNoSelfWrap() && 260 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 261 OS << "nw><"; 262 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 263 OS << ">"; 264 return; 265 } 266 case scAddExpr: 267 case scMulExpr: 268 case scUMaxExpr: 269 case scSMaxExpr: { 270 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 271 const char *OpStr = nullptr; 272 switch (NAry->getSCEVType()) { 273 case scAddExpr: OpStr = " + "; break; 274 case scMulExpr: OpStr = " * "; break; 275 case scUMaxExpr: OpStr = " umax "; break; 276 case scSMaxExpr: OpStr = " smax "; break; 277 } 278 OS << "("; 279 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 280 I != E; ++I) { 281 OS << **I; 282 if (std::next(I) != E) 283 OS << OpStr; 284 } 285 OS << ")"; 286 switch (NAry->getSCEVType()) { 287 case scAddExpr: 288 case scMulExpr: 289 if (NAry->hasNoUnsignedWrap()) 290 OS << "<nuw>"; 291 if (NAry->hasNoSignedWrap()) 292 OS << "<nsw>"; 293 } 294 return; 295 } 296 case scUDivExpr: { 297 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 298 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 299 return; 300 } 301 case scUnknown: { 302 const SCEVUnknown *U = cast<SCEVUnknown>(this); 303 Type *AllocTy; 304 if (U->isSizeOf(AllocTy)) { 305 OS << "sizeof(" << *AllocTy << ")"; 306 return; 307 } 308 if (U->isAlignOf(AllocTy)) { 309 OS << "alignof(" << *AllocTy << ")"; 310 return; 311 } 312 313 Type *CTy; 314 Constant *FieldNo; 315 if (U->isOffsetOf(CTy, FieldNo)) { 316 OS << "offsetof(" << *CTy << ", "; 317 FieldNo->printAsOperand(OS, false); 318 OS << ")"; 319 return; 320 } 321 322 // Otherwise just print it normally. 323 U->getValue()->printAsOperand(OS, false); 324 return; 325 } 326 case scCouldNotCompute: 327 OS << "***COULDNOTCOMPUTE***"; 328 return; 329 } 330 llvm_unreachable("Unknown SCEV kind!"); 331 } 332 333 Type *SCEV::getType() const { 334 switch (static_cast<SCEVTypes>(getSCEVType())) { 335 case scConstant: 336 return cast<SCEVConstant>(this)->getType(); 337 case scTruncate: 338 case scZeroExtend: 339 case scSignExtend: 340 return cast<SCEVCastExpr>(this)->getType(); 341 case scAddRecExpr: 342 case scMulExpr: 343 case scUMaxExpr: 344 case scSMaxExpr: 345 return cast<SCEVNAryExpr>(this)->getType(); 346 case scAddExpr: 347 return cast<SCEVAddExpr>(this)->getType(); 348 case scUDivExpr: 349 return cast<SCEVUDivExpr>(this)->getType(); 350 case scUnknown: 351 return cast<SCEVUnknown>(this)->getType(); 352 case scCouldNotCompute: 353 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 354 } 355 llvm_unreachable("Unknown SCEV kind!"); 356 } 357 358 bool SCEV::isZero() const { 359 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 360 return SC->getValue()->isZero(); 361 return false; 362 } 363 364 bool SCEV::isOne() const { 365 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 366 return SC->getValue()->isOne(); 367 return false; 368 } 369 370 bool SCEV::isAllOnesValue() const { 371 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 372 return SC->getValue()->isMinusOne(); 373 return false; 374 } 375 376 bool SCEV::isNonConstantNegative() const { 377 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 378 if (!Mul) return false; 379 380 // If there is a constant factor, it will be first. 381 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 382 if (!SC) return false; 383 384 // Return true if the value is negative, this matches things like (-42 * V). 385 return SC->getAPInt().isNegative(); 386 } 387 388 SCEVCouldNotCompute::SCEVCouldNotCompute() : 389 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 390 391 bool SCEVCouldNotCompute::classof(const SCEV *S) { 392 return S->getSCEVType() == scCouldNotCompute; 393 } 394 395 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 396 FoldingSetNodeID ID; 397 ID.AddInteger(scConstant); 398 ID.AddPointer(V); 399 void *IP = nullptr; 400 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 401 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 402 UniqueSCEVs.InsertNode(S, IP); 403 return S; 404 } 405 406 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 407 return getConstant(ConstantInt::get(getContext(), Val)); 408 } 409 410 const SCEV * 411 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 412 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 413 return getConstant(ConstantInt::get(ITy, V, isSigned)); 414 } 415 416 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 417 unsigned SCEVTy, const SCEV *op, Type *ty) 418 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 419 420 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 421 const SCEV *op, Type *ty) 422 : SCEVCastExpr(ID, scTruncate, op, ty) { 423 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 424 (Ty->isIntegerTy() || Ty->isPointerTy()) && 425 "Cannot truncate non-integer value!"); 426 } 427 428 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 429 const SCEV *op, Type *ty) 430 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 431 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 432 (Ty->isIntegerTy() || Ty->isPointerTy()) && 433 "Cannot zero extend non-integer value!"); 434 } 435 436 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 437 const SCEV *op, Type *ty) 438 : SCEVCastExpr(ID, scSignExtend, op, ty) { 439 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 440 (Ty->isIntegerTy() || Ty->isPointerTy()) && 441 "Cannot sign extend non-integer value!"); 442 } 443 444 void SCEVUnknown::deleted() { 445 // Clear this SCEVUnknown from various maps. 446 SE->forgetMemoizedResults(this); 447 448 // Remove this SCEVUnknown from the uniquing map. 449 SE->UniqueSCEVs.RemoveNode(this); 450 451 // Release the value. 452 setValPtr(nullptr); 453 } 454 455 void SCEVUnknown::allUsesReplacedWith(Value *New) { 456 // Remove this SCEVUnknown from the uniquing map. 457 SE->UniqueSCEVs.RemoveNode(this); 458 459 // Update this SCEVUnknown to point to the new value. This is needed 460 // because there may still be outstanding SCEVs which still point to 461 // this SCEVUnknown. 462 setValPtr(New); 463 } 464 465 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 466 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 467 if (VCE->getOpcode() == Instruction::PtrToInt) 468 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 469 if (CE->getOpcode() == Instruction::GetElementPtr && 470 CE->getOperand(0)->isNullValue() && 471 CE->getNumOperands() == 2) 472 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 473 if (CI->isOne()) { 474 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 475 ->getElementType(); 476 return true; 477 } 478 479 return false; 480 } 481 482 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 483 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 484 if (VCE->getOpcode() == Instruction::PtrToInt) 485 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 486 if (CE->getOpcode() == Instruction::GetElementPtr && 487 CE->getOperand(0)->isNullValue()) { 488 Type *Ty = 489 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 490 if (StructType *STy = dyn_cast<StructType>(Ty)) 491 if (!STy->isPacked() && 492 CE->getNumOperands() == 3 && 493 CE->getOperand(1)->isNullValue()) { 494 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 495 if (CI->isOne() && 496 STy->getNumElements() == 2 && 497 STy->getElementType(0)->isIntegerTy(1)) { 498 AllocTy = STy->getElementType(1); 499 return true; 500 } 501 } 502 } 503 504 return false; 505 } 506 507 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 508 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 509 if (VCE->getOpcode() == Instruction::PtrToInt) 510 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 511 if (CE->getOpcode() == Instruction::GetElementPtr && 512 CE->getNumOperands() == 3 && 513 CE->getOperand(0)->isNullValue() && 514 CE->getOperand(1)->isNullValue()) { 515 Type *Ty = 516 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 517 // Ignore vector types here so that ScalarEvolutionExpander doesn't 518 // emit getelementptrs that index into vectors. 519 if (Ty->isStructTy() || Ty->isArrayTy()) { 520 CTy = Ty; 521 FieldNo = CE->getOperand(2); 522 return true; 523 } 524 } 525 526 return false; 527 } 528 529 //===----------------------------------------------------------------------===// 530 // SCEV Utilities 531 //===----------------------------------------------------------------------===// 532 533 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 534 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 535 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 536 /// have been previously deemed to be "equally complex" by this routine. It is 537 /// intended to avoid exponential time complexity in cases like: 538 /// 539 /// %a = f(%x, %y) 540 /// %b = f(%a, %a) 541 /// %c = f(%b, %b) 542 /// 543 /// %d = f(%x, %y) 544 /// %e = f(%d, %d) 545 /// %f = f(%e, %e) 546 /// 547 /// CompareValueComplexity(%f, %c) 548 /// 549 /// Since we do not continue running this routine on expression trees once we 550 /// have seen unequal values, there is no need to track them in the cache. 551 static int 552 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 553 const LoopInfo *const LI, Value *LV, Value *RV, 554 unsigned Depth) { 555 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 556 return 0; 557 558 // Order pointer values after integer values. This helps SCEVExpander form 559 // GEPs. 560 bool LIsPointer = LV->getType()->isPointerTy(), 561 RIsPointer = RV->getType()->isPointerTy(); 562 if (LIsPointer != RIsPointer) 563 return (int)LIsPointer - (int)RIsPointer; 564 565 // Compare getValueID values. 566 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 567 if (LID != RID) 568 return (int)LID - (int)RID; 569 570 // Sort arguments by their position. 571 if (const auto *LA = dyn_cast<Argument>(LV)) { 572 const auto *RA = cast<Argument>(RV); 573 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 574 return (int)LArgNo - (int)RArgNo; 575 } 576 577 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 578 const auto *RGV = cast<GlobalValue>(RV); 579 580 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 581 auto LT = GV->getLinkage(); 582 return !(GlobalValue::isPrivateLinkage(LT) || 583 GlobalValue::isInternalLinkage(LT)); 584 }; 585 586 // Use the names to distinguish the two values, but only if the 587 // names are semantically important. 588 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 589 return LGV->getName().compare(RGV->getName()); 590 } 591 592 // For instructions, compare their loop depth, and their operand count. This 593 // is pretty loose. 594 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 595 const auto *RInst = cast<Instruction>(RV); 596 597 // Compare loop depths. 598 const BasicBlock *LParent = LInst->getParent(), 599 *RParent = RInst->getParent(); 600 if (LParent != RParent) { 601 unsigned LDepth = LI->getLoopDepth(LParent), 602 RDepth = LI->getLoopDepth(RParent); 603 if (LDepth != RDepth) 604 return (int)LDepth - (int)RDepth; 605 } 606 607 // Compare the number of operands. 608 unsigned LNumOps = LInst->getNumOperands(), 609 RNumOps = RInst->getNumOperands(); 610 if (LNumOps != RNumOps) 611 return (int)LNumOps - (int)RNumOps; 612 613 for (unsigned Idx : seq(0u, LNumOps)) { 614 int Result = 615 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 616 RInst->getOperand(Idx), Depth + 1); 617 if (Result != 0) 618 return Result; 619 } 620 } 621 622 EqCacheValue.unionSets(LV, RV); 623 return 0; 624 } 625 626 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 627 // than RHS, respectively. A three-way result allows recursive comparisons to be 628 // more efficient. 629 static int CompareSCEVComplexity( 630 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 631 EquivalenceClasses<const Value *> &EqCacheValue, 632 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 633 DominatorTree &DT, unsigned Depth = 0) { 634 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 635 if (LHS == RHS) 636 return 0; 637 638 // Primarily, sort the SCEVs by their getSCEVType(). 639 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 640 if (LType != RType) 641 return (int)LType - (int)RType; 642 643 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 644 return 0; 645 // Aside from the getSCEVType() ordering, the particular ordering 646 // isn't very important except that it's beneficial to be consistent, 647 // so that (a + b) and (b + a) don't end up as different expressions. 648 switch (static_cast<SCEVTypes>(LType)) { 649 case scUnknown: { 650 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 651 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 652 653 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 654 RU->getValue(), Depth + 1); 655 if (X == 0) 656 EqCacheSCEV.unionSets(LHS, RHS); 657 return X; 658 } 659 660 case scConstant: { 661 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 662 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 663 664 // Compare constant values. 665 const APInt &LA = LC->getAPInt(); 666 const APInt &RA = RC->getAPInt(); 667 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 668 if (LBitWidth != RBitWidth) 669 return (int)LBitWidth - (int)RBitWidth; 670 return LA.ult(RA) ? -1 : 1; 671 } 672 673 case scAddRecExpr: { 674 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 675 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 676 677 // There is always a dominance between two recs that are used by one SCEV, 678 // so we can safely sort recs by loop header dominance. We require such 679 // order in getAddExpr. 680 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 681 if (LLoop != RLoop) { 682 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 683 assert(LHead != RHead && "Two loops share the same header?"); 684 if (DT.dominates(LHead, RHead)) 685 return 1; 686 else 687 assert(DT.dominates(RHead, LHead) && 688 "No dominance between recurrences used by one SCEV?"); 689 return -1; 690 } 691 692 // Addrec complexity grows with operand count. 693 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 694 if (LNumOps != RNumOps) 695 return (int)LNumOps - (int)RNumOps; 696 697 // Compare NoWrap flags. 698 if (LA->getNoWrapFlags() != RA->getNoWrapFlags()) 699 return (int)LA->getNoWrapFlags() - (int)RA->getNoWrapFlags(); 700 701 // Lexicographically compare. 702 for (unsigned i = 0; i != LNumOps; ++i) { 703 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 704 LA->getOperand(i), RA->getOperand(i), DT, 705 Depth + 1); 706 if (X != 0) 707 return X; 708 } 709 EqCacheSCEV.unionSets(LHS, RHS); 710 return 0; 711 } 712 713 case scAddExpr: 714 case scMulExpr: 715 case scSMaxExpr: 716 case scUMaxExpr: { 717 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 718 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 719 720 // Lexicographically compare n-ary expressions. 721 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 722 if (LNumOps != RNumOps) 723 return (int)LNumOps - (int)RNumOps; 724 725 // Compare NoWrap flags. 726 if (LC->getNoWrapFlags() != RC->getNoWrapFlags()) 727 return (int)LC->getNoWrapFlags() - (int)RC->getNoWrapFlags(); 728 729 for (unsigned i = 0; i != LNumOps; ++i) { 730 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 731 LC->getOperand(i), RC->getOperand(i), DT, 732 Depth + 1); 733 if (X != 0) 734 return X; 735 } 736 EqCacheSCEV.unionSets(LHS, RHS); 737 return 0; 738 } 739 740 case scUDivExpr: { 741 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 742 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 743 744 // Lexicographically compare udiv expressions. 745 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 746 RC->getLHS(), DT, Depth + 1); 747 if (X != 0) 748 return X; 749 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 750 RC->getRHS(), DT, Depth + 1); 751 if (X == 0) 752 EqCacheSCEV.unionSets(LHS, RHS); 753 return X; 754 } 755 756 case scTruncate: 757 case scZeroExtend: 758 case scSignExtend: { 759 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 760 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 761 762 // Compare cast expressions by operand. 763 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 764 LC->getOperand(), RC->getOperand(), DT, 765 Depth + 1); 766 if (X == 0) 767 EqCacheSCEV.unionSets(LHS, RHS); 768 return X; 769 } 770 771 case scCouldNotCompute: 772 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 773 } 774 llvm_unreachable("Unknown SCEV kind!"); 775 } 776 777 /// Given a list of SCEV objects, order them by their complexity, and group 778 /// objects of the same complexity together by value. When this routine is 779 /// finished, we know that any duplicates in the vector are consecutive and that 780 /// complexity is monotonically increasing. 781 /// 782 /// Note that we go take special precautions to ensure that we get deterministic 783 /// results from this routine. In other words, we don't want the results of 784 /// this to depend on where the addresses of various SCEV objects happened to 785 /// land in memory. 786 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 787 LoopInfo *LI, DominatorTree &DT) { 788 if (Ops.size() < 2) return; // Noop 789 790 EquivalenceClasses<const SCEV *> EqCacheSCEV; 791 EquivalenceClasses<const Value *> EqCacheValue; 792 if (Ops.size() == 2) { 793 // This is the common case, which also happens to be trivially simple. 794 // Special case it. 795 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 796 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 797 std::swap(LHS, RHS); 798 return; 799 } 800 801 // Do the rough sort by complexity. 802 std::stable_sort(Ops.begin(), Ops.end(), 803 [&](const SCEV *LHS, const SCEV *RHS) { 804 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 805 LHS, RHS, DT) < 0; 806 }); 807 808 // Now that we are sorted by complexity, group elements of the same 809 // complexity. Note that this is, at worst, N^2, but the vector is likely to 810 // be extremely short in practice. Note that we take this approach because we 811 // do not want to depend on the addresses of the objects we are grouping. 812 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 813 const SCEV *S = Ops[i]; 814 unsigned Complexity = S->getSCEVType(); 815 816 // If there are any objects of the same complexity and same value as this 817 // one, group them. 818 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 819 if (Ops[j] == S) { // Found a duplicate. 820 // Move it to immediately after i'th element. 821 std::swap(Ops[i+1], Ops[j]); 822 ++i; // no need to rescan it. 823 if (i == e-2) return; // Done! 824 } 825 } 826 } 827 } 828 829 // Returns the size of the SCEV S. 830 static inline int sizeOfSCEV(const SCEV *S) { 831 struct FindSCEVSize { 832 int Size = 0; 833 834 FindSCEVSize() = default; 835 836 bool follow(const SCEV *S) { 837 ++Size; 838 // Keep looking at all operands of S. 839 return true; 840 } 841 842 bool isDone() const { 843 return false; 844 } 845 }; 846 847 FindSCEVSize F; 848 SCEVTraversal<FindSCEVSize> ST(F); 849 ST.visitAll(S); 850 return F.Size; 851 } 852 853 namespace { 854 855 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 856 public: 857 // Computes the Quotient and Remainder of the division of Numerator by 858 // Denominator. 859 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 860 const SCEV *Denominator, const SCEV **Quotient, 861 const SCEV **Remainder) { 862 assert(Numerator && Denominator && "Uninitialized SCEV"); 863 864 SCEVDivision D(SE, Numerator, Denominator); 865 866 // Check for the trivial case here to avoid having to check for it in the 867 // rest of the code. 868 if (Numerator == Denominator) { 869 *Quotient = D.One; 870 *Remainder = D.Zero; 871 return; 872 } 873 874 if (Numerator->isZero()) { 875 *Quotient = D.Zero; 876 *Remainder = D.Zero; 877 return; 878 } 879 880 // A simple case when N/1. The quotient is N. 881 if (Denominator->isOne()) { 882 *Quotient = Numerator; 883 *Remainder = D.Zero; 884 return; 885 } 886 887 // Split the Denominator when it is a product. 888 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 889 const SCEV *Q, *R; 890 *Quotient = Numerator; 891 for (const SCEV *Op : T->operands()) { 892 divide(SE, *Quotient, Op, &Q, &R); 893 *Quotient = Q; 894 895 // Bail out when the Numerator is not divisible by one of the terms of 896 // the Denominator. 897 if (!R->isZero()) { 898 *Quotient = D.Zero; 899 *Remainder = Numerator; 900 return; 901 } 902 } 903 *Remainder = D.Zero; 904 return; 905 } 906 907 D.visit(Numerator); 908 *Quotient = D.Quotient; 909 *Remainder = D.Remainder; 910 } 911 912 // Except in the trivial case described above, we do not know how to divide 913 // Expr by Denominator for the following functions with empty implementation. 914 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 915 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 916 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 917 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 918 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 919 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 920 void visitUnknown(const SCEVUnknown *Numerator) {} 921 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 922 923 void visitConstant(const SCEVConstant *Numerator) { 924 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 925 APInt NumeratorVal = Numerator->getAPInt(); 926 APInt DenominatorVal = D->getAPInt(); 927 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 928 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 929 930 if (NumeratorBW > DenominatorBW) 931 DenominatorVal = DenominatorVal.sext(NumeratorBW); 932 else if (NumeratorBW < DenominatorBW) 933 NumeratorVal = NumeratorVal.sext(DenominatorBW); 934 935 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 936 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 937 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 938 Quotient = SE.getConstant(QuotientVal); 939 Remainder = SE.getConstant(RemainderVal); 940 return; 941 } 942 } 943 944 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 945 const SCEV *StartQ, *StartR, *StepQ, *StepR; 946 if (!Numerator->isAffine()) 947 return cannotDivide(Numerator); 948 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 949 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 950 // Bail out if the types do not match. 951 Type *Ty = Denominator->getType(); 952 if (Ty != StartQ->getType() || Ty != StartR->getType() || 953 Ty != StepQ->getType() || Ty != StepR->getType()) 954 return cannotDivide(Numerator); 955 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 956 Numerator->getNoWrapFlags()); 957 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 958 Numerator->getNoWrapFlags()); 959 } 960 961 void visitAddExpr(const SCEVAddExpr *Numerator) { 962 SmallVector<const SCEV *, 2> Qs, Rs; 963 Type *Ty = Denominator->getType(); 964 965 for (const SCEV *Op : Numerator->operands()) { 966 const SCEV *Q, *R; 967 divide(SE, Op, Denominator, &Q, &R); 968 969 // Bail out if types do not match. 970 if (Ty != Q->getType() || Ty != R->getType()) 971 return cannotDivide(Numerator); 972 973 Qs.push_back(Q); 974 Rs.push_back(R); 975 } 976 977 if (Qs.size() == 1) { 978 Quotient = Qs[0]; 979 Remainder = Rs[0]; 980 return; 981 } 982 983 Quotient = SE.getAddExpr(Qs); 984 Remainder = SE.getAddExpr(Rs); 985 } 986 987 void visitMulExpr(const SCEVMulExpr *Numerator) { 988 SmallVector<const SCEV *, 2> Qs; 989 Type *Ty = Denominator->getType(); 990 991 bool FoundDenominatorTerm = false; 992 for (const SCEV *Op : Numerator->operands()) { 993 // Bail out if types do not match. 994 if (Ty != Op->getType()) 995 return cannotDivide(Numerator); 996 997 if (FoundDenominatorTerm) { 998 Qs.push_back(Op); 999 continue; 1000 } 1001 1002 // Check whether Denominator divides one of the product operands. 1003 const SCEV *Q, *R; 1004 divide(SE, Op, Denominator, &Q, &R); 1005 if (!R->isZero()) { 1006 Qs.push_back(Op); 1007 continue; 1008 } 1009 1010 // Bail out if types do not match. 1011 if (Ty != Q->getType()) 1012 return cannotDivide(Numerator); 1013 1014 FoundDenominatorTerm = true; 1015 Qs.push_back(Q); 1016 } 1017 1018 if (FoundDenominatorTerm) { 1019 Remainder = Zero; 1020 if (Qs.size() == 1) 1021 Quotient = Qs[0]; 1022 else 1023 Quotient = SE.getMulExpr(Qs); 1024 return; 1025 } 1026 1027 if (!isa<SCEVUnknown>(Denominator)) 1028 return cannotDivide(Numerator); 1029 1030 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1031 ValueToValueMap RewriteMap; 1032 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1033 cast<SCEVConstant>(Zero)->getValue(); 1034 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1035 1036 if (Remainder->isZero()) { 1037 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1038 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1039 cast<SCEVConstant>(One)->getValue(); 1040 Quotient = 1041 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1042 return; 1043 } 1044 1045 // Quotient is (Numerator - Remainder) divided by Denominator. 1046 const SCEV *Q, *R; 1047 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1048 // This SCEV does not seem to simplify: fail the division here. 1049 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1050 return cannotDivide(Numerator); 1051 divide(SE, Diff, Denominator, &Q, &R); 1052 if (R != Zero) 1053 return cannotDivide(Numerator); 1054 Quotient = Q; 1055 } 1056 1057 private: 1058 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1059 const SCEV *Denominator) 1060 : SE(S), Denominator(Denominator) { 1061 Zero = SE.getZero(Denominator->getType()); 1062 One = SE.getOne(Denominator->getType()); 1063 1064 // We generally do not know how to divide Expr by Denominator. We 1065 // initialize the division to a "cannot divide" state to simplify the rest 1066 // of the code. 1067 cannotDivide(Numerator); 1068 } 1069 1070 // Convenience function for giving up on the division. We set the quotient to 1071 // be equal to zero and the remainder to be equal to the numerator. 1072 void cannotDivide(const SCEV *Numerator) { 1073 Quotient = Zero; 1074 Remainder = Numerator; 1075 } 1076 1077 ScalarEvolution &SE; 1078 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1079 }; 1080 1081 } // end anonymous namespace 1082 1083 //===----------------------------------------------------------------------===// 1084 // Simple SCEV method implementations 1085 //===----------------------------------------------------------------------===// 1086 1087 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1088 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1089 ScalarEvolution &SE, 1090 Type *ResultTy) { 1091 // Handle the simplest case efficiently. 1092 if (K == 1) 1093 return SE.getTruncateOrZeroExtend(It, ResultTy); 1094 1095 // We are using the following formula for BC(It, K): 1096 // 1097 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1098 // 1099 // Suppose, W is the bitwidth of the return value. We must be prepared for 1100 // overflow. Hence, we must assure that the result of our computation is 1101 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1102 // safe in modular arithmetic. 1103 // 1104 // However, this code doesn't use exactly that formula; the formula it uses 1105 // is something like the following, where T is the number of factors of 2 in 1106 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1107 // exponentiation: 1108 // 1109 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1110 // 1111 // This formula is trivially equivalent to the previous formula. However, 1112 // this formula can be implemented much more efficiently. The trick is that 1113 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1114 // arithmetic. To do exact division in modular arithmetic, all we have 1115 // to do is multiply by the inverse. Therefore, this step can be done at 1116 // width W. 1117 // 1118 // The next issue is how to safely do the division by 2^T. The way this 1119 // is done is by doing the multiplication step at a width of at least W + T 1120 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1121 // when we perform the division by 2^T (which is equivalent to a right shift 1122 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1123 // truncated out after the division by 2^T. 1124 // 1125 // In comparison to just directly using the first formula, this technique 1126 // is much more efficient; using the first formula requires W * K bits, 1127 // but this formula less than W + K bits. Also, the first formula requires 1128 // a division step, whereas this formula only requires multiplies and shifts. 1129 // 1130 // It doesn't matter whether the subtraction step is done in the calculation 1131 // width or the input iteration count's width; if the subtraction overflows, 1132 // the result must be zero anyway. We prefer here to do it in the width of 1133 // the induction variable because it helps a lot for certain cases; CodeGen 1134 // isn't smart enough to ignore the overflow, which leads to much less 1135 // efficient code if the width of the subtraction is wider than the native 1136 // register width. 1137 // 1138 // (It's possible to not widen at all by pulling out factors of 2 before 1139 // the multiplication; for example, K=2 can be calculated as 1140 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1141 // extra arithmetic, so it's not an obvious win, and it gets 1142 // much more complicated for K > 3.) 1143 1144 // Protection from insane SCEVs; this bound is conservative, 1145 // but it probably doesn't matter. 1146 if (K > 1000) 1147 return SE.getCouldNotCompute(); 1148 1149 unsigned W = SE.getTypeSizeInBits(ResultTy); 1150 1151 // Calculate K! / 2^T and T; we divide out the factors of two before 1152 // multiplying for calculating K! / 2^T to avoid overflow. 1153 // Other overflow doesn't matter because we only care about the bottom 1154 // W bits of the result. 1155 APInt OddFactorial(W, 1); 1156 unsigned T = 1; 1157 for (unsigned i = 3; i <= K; ++i) { 1158 APInt Mult(W, i); 1159 unsigned TwoFactors = Mult.countTrailingZeros(); 1160 T += TwoFactors; 1161 Mult.lshrInPlace(TwoFactors); 1162 OddFactorial *= Mult; 1163 } 1164 1165 // We need at least W + T bits for the multiplication step 1166 unsigned CalculationBits = W + T; 1167 1168 // Calculate 2^T, at width T+W. 1169 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1170 1171 // Calculate the multiplicative inverse of K! / 2^T; 1172 // this multiplication factor will perform the exact division by 1173 // K! / 2^T. 1174 APInt Mod = APInt::getSignedMinValue(W+1); 1175 APInt MultiplyFactor = OddFactorial.zext(W+1); 1176 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1177 MultiplyFactor = MultiplyFactor.trunc(W); 1178 1179 // Calculate the product, at width T+W 1180 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1181 CalculationBits); 1182 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1183 for (unsigned i = 1; i != K; ++i) { 1184 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1185 Dividend = SE.getMulExpr(Dividend, 1186 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1187 } 1188 1189 // Divide by 2^T 1190 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1191 1192 // Truncate the result, and divide by K! / 2^T. 1193 1194 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1195 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1196 } 1197 1198 /// Return the value of this chain of recurrences at the specified iteration 1199 /// number. We can evaluate this recurrence by multiplying each element in the 1200 /// chain by the binomial coefficient corresponding to it. In other words, we 1201 /// can evaluate {A,+,B,+,C,+,D} as: 1202 /// 1203 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1204 /// 1205 /// where BC(It, k) stands for binomial coefficient. 1206 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1207 ScalarEvolution &SE) const { 1208 const SCEV *Result = getStart(); 1209 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1210 // The computation is correct in the face of overflow provided that the 1211 // multiplication is performed _after_ the evaluation of the binomial 1212 // coefficient. 1213 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1214 if (isa<SCEVCouldNotCompute>(Coeff)) 1215 return Coeff; 1216 1217 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1218 } 1219 return Result; 1220 } 1221 1222 //===----------------------------------------------------------------------===// 1223 // SCEV Expression folder implementations 1224 //===----------------------------------------------------------------------===// 1225 1226 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1227 Type *Ty) { 1228 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1229 "This is not a truncating conversion!"); 1230 assert(isSCEVable(Ty) && 1231 "This is not a conversion to a SCEVable type!"); 1232 Ty = getEffectiveSCEVType(Ty); 1233 1234 FoldingSetNodeID ID; 1235 ID.AddInteger(scTruncate); 1236 ID.AddPointer(Op); 1237 ID.AddPointer(Ty); 1238 void *IP = nullptr; 1239 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1240 1241 // Fold if the operand is constant. 1242 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1243 return getConstant( 1244 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1245 1246 // trunc(trunc(x)) --> trunc(x) 1247 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1248 return getTruncateExpr(ST->getOperand(), Ty); 1249 1250 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1251 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1252 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1253 1254 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1255 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1256 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1257 1258 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1259 // eliminate all the truncates, or we replace other casts with truncates. 1260 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1261 SmallVector<const SCEV *, 4> Operands; 1262 bool hasTrunc = false; 1263 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1264 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1265 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1266 hasTrunc = isa<SCEVTruncateExpr>(S); 1267 Operands.push_back(S); 1268 } 1269 if (!hasTrunc) 1270 return getAddExpr(Operands); 1271 // In spite we checked in the beginning that ID is not in the cache, 1272 // it is possible that during recursion and different modification 1273 // ID came to cache, so if we found it, just return it. 1274 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1275 return S; 1276 } 1277 1278 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1279 // eliminate all the truncates, or we replace other casts with truncates. 1280 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1281 SmallVector<const SCEV *, 4> Operands; 1282 bool hasTrunc = false; 1283 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1284 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1285 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1286 hasTrunc = isa<SCEVTruncateExpr>(S); 1287 Operands.push_back(S); 1288 } 1289 if (!hasTrunc) 1290 return getMulExpr(Operands); 1291 // In spite we checked in the beginning that ID is not in the cache, 1292 // it is possible that during recursion and different modification 1293 // ID came to cache, so if we found it, just return it. 1294 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1295 return S; 1296 } 1297 1298 // If the input value is a chrec scev, truncate the chrec's operands. 1299 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1300 SmallVector<const SCEV *, 4> Operands; 1301 for (const SCEV *Op : AddRec->operands()) 1302 Operands.push_back(getTruncateExpr(Op, Ty)); 1303 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1304 } 1305 1306 // The cast wasn't folded; create an explicit cast node. We can reuse 1307 // the existing insert position since if we get here, we won't have 1308 // made any changes which would invalidate it. 1309 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1310 Op, Ty); 1311 UniqueSCEVs.InsertNode(S, IP); 1312 addToLoopUseLists(S); 1313 return S; 1314 } 1315 1316 // Get the limit of a recurrence such that incrementing by Step cannot cause 1317 // signed overflow as long as the value of the recurrence within the 1318 // loop does not exceed this limit before incrementing. 1319 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1320 ICmpInst::Predicate *Pred, 1321 ScalarEvolution *SE) { 1322 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1323 if (SE->isKnownPositive(Step)) { 1324 *Pred = ICmpInst::ICMP_SLT; 1325 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1326 SE->getSignedRangeMax(Step)); 1327 } 1328 if (SE->isKnownNegative(Step)) { 1329 *Pred = ICmpInst::ICMP_SGT; 1330 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1331 SE->getSignedRangeMin(Step)); 1332 } 1333 return nullptr; 1334 } 1335 1336 // Get the limit of a recurrence such that incrementing by Step cannot cause 1337 // unsigned overflow as long as the value of the recurrence within the loop does 1338 // not exceed this limit before incrementing. 1339 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1340 ICmpInst::Predicate *Pred, 1341 ScalarEvolution *SE) { 1342 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1343 *Pred = ICmpInst::ICMP_ULT; 1344 1345 return SE->getConstant(APInt::getMinValue(BitWidth) - 1346 SE->getUnsignedRangeMax(Step)); 1347 } 1348 1349 namespace { 1350 1351 struct ExtendOpTraitsBase { 1352 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1353 unsigned); 1354 }; 1355 1356 // Used to make code generic over signed and unsigned overflow. 1357 template <typename ExtendOp> struct ExtendOpTraits { 1358 // Members present: 1359 // 1360 // static const SCEV::NoWrapFlags WrapType; 1361 // 1362 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1363 // 1364 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1365 // ICmpInst::Predicate *Pred, 1366 // ScalarEvolution *SE); 1367 }; 1368 1369 template <> 1370 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1371 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1372 1373 static const GetExtendExprTy GetExtendExpr; 1374 1375 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1376 ICmpInst::Predicate *Pred, 1377 ScalarEvolution *SE) { 1378 return getSignedOverflowLimitForStep(Step, Pred, SE); 1379 } 1380 }; 1381 1382 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1383 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1384 1385 template <> 1386 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1387 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1388 1389 static const GetExtendExprTy GetExtendExpr; 1390 1391 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1392 ICmpInst::Predicate *Pred, 1393 ScalarEvolution *SE) { 1394 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1395 } 1396 }; 1397 1398 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1399 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1400 1401 } // end anonymous namespace 1402 1403 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1404 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1405 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1406 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1407 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1408 // expression "Step + sext/zext(PreIncAR)" is congruent with 1409 // "sext/zext(PostIncAR)" 1410 template <typename ExtendOpTy> 1411 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1412 ScalarEvolution *SE, unsigned Depth) { 1413 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1414 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1415 1416 const Loop *L = AR->getLoop(); 1417 const SCEV *Start = AR->getStart(); 1418 const SCEV *Step = AR->getStepRecurrence(*SE); 1419 1420 // Check for a simple looking step prior to loop entry. 1421 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1422 if (!SA) 1423 return nullptr; 1424 1425 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1426 // subtraction is expensive. For this purpose, perform a quick and dirty 1427 // difference, by checking for Step in the operand list. 1428 SmallVector<const SCEV *, 4> DiffOps; 1429 for (const SCEV *Op : SA->operands()) 1430 if (Op != Step) 1431 DiffOps.push_back(Op); 1432 1433 if (DiffOps.size() == SA->getNumOperands()) 1434 return nullptr; 1435 1436 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1437 // `Step`: 1438 1439 // 1. NSW/NUW flags on the step increment. 1440 auto PreStartFlags = 1441 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1442 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1443 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1444 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1445 1446 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1447 // "S+X does not sign/unsign-overflow". 1448 // 1449 1450 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1451 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1452 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1453 return PreStart; 1454 1455 // 2. Direct overflow check on the step operation's expression. 1456 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1457 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1458 const SCEV *OperandExtendedStart = 1459 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1460 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1461 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1462 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1463 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1464 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1465 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1466 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1467 } 1468 return PreStart; 1469 } 1470 1471 // 3. Loop precondition. 1472 ICmpInst::Predicate Pred; 1473 const SCEV *OverflowLimit = 1474 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1475 1476 if (OverflowLimit && 1477 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1478 return PreStart; 1479 1480 return nullptr; 1481 } 1482 1483 // Get the normalized zero or sign extended expression for this AddRec's Start. 1484 template <typename ExtendOpTy> 1485 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1486 ScalarEvolution *SE, 1487 unsigned Depth) { 1488 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1489 1490 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1491 if (!PreStart) 1492 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1493 1494 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1495 Depth), 1496 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1497 } 1498 1499 // Try to prove away overflow by looking at "nearby" add recurrences. A 1500 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1501 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1502 // 1503 // Formally: 1504 // 1505 // {S,+,X} == {S-T,+,X} + T 1506 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1507 // 1508 // If ({S-T,+,X} + T) does not overflow ... (1) 1509 // 1510 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1511 // 1512 // If {S-T,+,X} does not overflow ... (2) 1513 // 1514 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1515 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1516 // 1517 // If (S-T)+T does not overflow ... (3) 1518 // 1519 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1520 // == {Ext(S),+,Ext(X)} == LHS 1521 // 1522 // Thus, if (1), (2) and (3) are true for some T, then 1523 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1524 // 1525 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1526 // does not overflow" restricted to the 0th iteration. Therefore we only need 1527 // to check for (1) and (2). 1528 // 1529 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1530 // is `Delta` (defined below). 1531 template <typename ExtendOpTy> 1532 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1533 const SCEV *Step, 1534 const Loop *L) { 1535 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1536 1537 // We restrict `Start` to a constant to prevent SCEV from spending too much 1538 // time here. It is correct (but more expensive) to continue with a 1539 // non-constant `Start` and do a general SCEV subtraction to compute 1540 // `PreStart` below. 1541 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1542 if (!StartC) 1543 return false; 1544 1545 APInt StartAI = StartC->getAPInt(); 1546 1547 for (unsigned Delta : {-2, -1, 1, 2}) { 1548 const SCEV *PreStart = getConstant(StartAI - Delta); 1549 1550 FoldingSetNodeID ID; 1551 ID.AddInteger(scAddRecExpr); 1552 ID.AddPointer(PreStart); 1553 ID.AddPointer(Step); 1554 ID.AddPointer(L); 1555 void *IP = nullptr; 1556 const auto *PreAR = 1557 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1558 1559 // Give up if we don't already have the add recurrence we need because 1560 // actually constructing an add recurrence is relatively expensive. 1561 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1562 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1563 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1564 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1565 DeltaS, &Pred, this); 1566 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1567 return true; 1568 } 1569 } 1570 1571 return false; 1572 } 1573 1574 const SCEV * 1575 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1576 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1577 "This is not an extending conversion!"); 1578 assert(isSCEVable(Ty) && 1579 "This is not a conversion to a SCEVable type!"); 1580 Ty = getEffectiveSCEVType(Ty); 1581 1582 // Fold if the operand is constant. 1583 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1584 return getConstant( 1585 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1586 1587 // zext(zext(x)) --> zext(x) 1588 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1589 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1590 1591 // Before doing any expensive analysis, check to see if we've already 1592 // computed a SCEV for this Op and Ty. 1593 FoldingSetNodeID ID; 1594 ID.AddInteger(scZeroExtend); 1595 ID.AddPointer(Op); 1596 ID.AddPointer(Ty); 1597 void *IP = nullptr; 1598 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1599 if (Depth > MaxExtDepth) { 1600 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1601 Op, Ty); 1602 UniqueSCEVs.InsertNode(S, IP); 1603 addToLoopUseLists(S); 1604 return S; 1605 } 1606 1607 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1608 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1609 // It's possible the bits taken off by the truncate were all zero bits. If 1610 // so, we should be able to simplify this further. 1611 const SCEV *X = ST->getOperand(); 1612 ConstantRange CR = getUnsignedRange(X); 1613 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1614 unsigned NewBits = getTypeSizeInBits(Ty); 1615 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1616 CR.zextOrTrunc(NewBits))) 1617 return getTruncateOrZeroExtend(X, Ty); 1618 } 1619 1620 // If the input value is a chrec scev, and we can prove that the value 1621 // did not overflow the old, smaller, value, we can zero extend all of the 1622 // operands (often constants). This allows analysis of something like 1623 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1624 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1625 if (AR->isAffine()) { 1626 const SCEV *Start = AR->getStart(); 1627 const SCEV *Step = AR->getStepRecurrence(*this); 1628 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1629 const Loop *L = AR->getLoop(); 1630 1631 if (!AR->hasNoUnsignedWrap()) { 1632 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1633 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1634 } 1635 1636 // If we have special knowledge that this addrec won't overflow, 1637 // we don't need to do any further analysis. 1638 if (AR->hasNoUnsignedWrap()) 1639 return getAddRecExpr( 1640 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1641 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1642 1643 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1644 // Note that this serves two purposes: It filters out loops that are 1645 // simply not analyzable, and it covers the case where this code is 1646 // being called from within backedge-taken count analysis, such that 1647 // attempting to ask for the backedge-taken count would likely result 1648 // in infinite recursion. In the later case, the analysis code will 1649 // cope with a conservative value, and it will take care to purge 1650 // that value once it has finished. 1651 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1652 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1653 // Manually compute the final value for AR, checking for 1654 // overflow. 1655 1656 // Check whether the backedge-taken count can be losslessly casted to 1657 // the addrec's type. The count is always unsigned. 1658 const SCEV *CastedMaxBECount = 1659 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1660 const SCEV *RecastedMaxBECount = 1661 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1662 if (MaxBECount == RecastedMaxBECount) { 1663 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1664 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1665 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1666 SCEV::FlagAnyWrap, Depth + 1); 1667 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1668 SCEV::FlagAnyWrap, 1669 Depth + 1), 1670 WideTy, Depth + 1); 1671 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1672 const SCEV *WideMaxBECount = 1673 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1674 const SCEV *OperandExtendedAdd = 1675 getAddExpr(WideStart, 1676 getMulExpr(WideMaxBECount, 1677 getZeroExtendExpr(Step, WideTy, Depth + 1), 1678 SCEV::FlagAnyWrap, Depth + 1), 1679 SCEV::FlagAnyWrap, Depth + 1); 1680 if (ZAdd == OperandExtendedAdd) { 1681 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1682 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1683 // Return the expression with the addrec on the outside. 1684 return getAddRecExpr( 1685 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1686 Depth + 1), 1687 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1688 AR->getNoWrapFlags()); 1689 } 1690 // Similar to above, only this time treat the step value as signed. 1691 // This covers loops that count down. 1692 OperandExtendedAdd = 1693 getAddExpr(WideStart, 1694 getMulExpr(WideMaxBECount, 1695 getSignExtendExpr(Step, WideTy, Depth + 1), 1696 SCEV::FlagAnyWrap, Depth + 1), 1697 SCEV::FlagAnyWrap, Depth + 1); 1698 if (ZAdd == OperandExtendedAdd) { 1699 // Cache knowledge of AR NW, which is propagated to this AddRec. 1700 // Negative step causes unsigned wrap, but it still can't self-wrap. 1701 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1702 // Return the expression with the addrec on the outside. 1703 return getAddRecExpr( 1704 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1705 Depth + 1), 1706 getSignExtendExpr(Step, Ty, Depth + 1), L, 1707 AR->getNoWrapFlags()); 1708 } 1709 } 1710 } 1711 1712 // Normally, in the cases we can prove no-overflow via a 1713 // backedge guarding condition, we can also compute a backedge 1714 // taken count for the loop. The exceptions are assumptions and 1715 // guards present in the loop -- SCEV is not great at exploiting 1716 // these to compute max backedge taken counts, but can still use 1717 // these to prove lack of overflow. Use this fact to avoid 1718 // doing extra work that may not pay off. 1719 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1720 !AC.assumptions().empty()) { 1721 // If the backedge is guarded by a comparison with the pre-inc 1722 // value the addrec is safe. Also, if the entry is guarded by 1723 // a comparison with the start value and the backedge is 1724 // guarded by a comparison with the post-inc value, the addrec 1725 // is safe. 1726 if (isKnownPositive(Step)) { 1727 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1728 getUnsignedRangeMax(Step)); 1729 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1730 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1731 // Cache knowledge of AR NUW, which is propagated to this 1732 // AddRec. 1733 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1734 // Return the expression with the addrec on the outside. 1735 return getAddRecExpr( 1736 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1737 Depth + 1), 1738 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1739 AR->getNoWrapFlags()); 1740 } 1741 } else if (isKnownNegative(Step)) { 1742 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1743 getSignedRangeMin(Step)); 1744 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1745 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1746 // Cache knowledge of AR NW, which is propagated to this 1747 // AddRec. Negative step causes unsigned wrap, but it 1748 // still can't self-wrap. 1749 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1750 // Return the expression with the addrec on the outside. 1751 return getAddRecExpr( 1752 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1753 Depth + 1), 1754 getSignExtendExpr(Step, Ty, Depth + 1), L, 1755 AR->getNoWrapFlags()); 1756 } 1757 } 1758 } 1759 1760 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1761 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1762 return getAddRecExpr( 1763 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1764 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1765 } 1766 } 1767 1768 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1769 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1770 if (SA->hasNoUnsignedWrap()) { 1771 // If the addition does not unsign overflow then we can, by definition, 1772 // commute the zero extension with the addition operation. 1773 SmallVector<const SCEV *, 4> Ops; 1774 for (const auto *Op : SA->operands()) 1775 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1776 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1777 } 1778 } 1779 1780 // The cast wasn't folded; create an explicit cast node. 1781 // Recompute the insert position, as it may have been invalidated. 1782 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1783 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1784 Op, Ty); 1785 UniqueSCEVs.InsertNode(S, IP); 1786 addToLoopUseLists(S); 1787 return S; 1788 } 1789 1790 const SCEV * 1791 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1792 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1793 "This is not an extending conversion!"); 1794 assert(isSCEVable(Ty) && 1795 "This is not a conversion to a SCEVable type!"); 1796 Ty = getEffectiveSCEVType(Ty); 1797 1798 // Fold if the operand is constant. 1799 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1800 return getConstant( 1801 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1802 1803 // sext(sext(x)) --> sext(x) 1804 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1805 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1806 1807 // sext(zext(x)) --> zext(x) 1808 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1809 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1810 1811 // Before doing any expensive analysis, check to see if we've already 1812 // computed a SCEV for this Op and Ty. 1813 FoldingSetNodeID ID; 1814 ID.AddInteger(scSignExtend); 1815 ID.AddPointer(Op); 1816 ID.AddPointer(Ty); 1817 void *IP = nullptr; 1818 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1819 // Limit recursion depth. 1820 if (Depth > MaxExtDepth) { 1821 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1822 Op, Ty); 1823 UniqueSCEVs.InsertNode(S, IP); 1824 addToLoopUseLists(S); 1825 return S; 1826 } 1827 1828 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1829 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1830 // It's possible the bits taken off by the truncate were all sign bits. If 1831 // so, we should be able to simplify this further. 1832 const SCEV *X = ST->getOperand(); 1833 ConstantRange CR = getSignedRange(X); 1834 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1835 unsigned NewBits = getTypeSizeInBits(Ty); 1836 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1837 CR.sextOrTrunc(NewBits))) 1838 return getTruncateOrSignExtend(X, Ty); 1839 } 1840 1841 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1842 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1843 if (SA->getNumOperands() == 2) { 1844 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1845 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1846 if (SMul && SC1) { 1847 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1848 const APInt &C1 = SC1->getAPInt(); 1849 const APInt &C2 = SC2->getAPInt(); 1850 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1851 C2.ugt(C1) && C2.isPowerOf2()) 1852 return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), 1853 getSignExtendExpr(SMul, Ty, Depth + 1), 1854 SCEV::FlagAnyWrap, Depth + 1); 1855 } 1856 } 1857 } 1858 1859 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1860 if (SA->hasNoSignedWrap()) { 1861 // If the addition does not sign overflow then we can, by definition, 1862 // commute the sign extension with the addition operation. 1863 SmallVector<const SCEV *, 4> Ops; 1864 for (const auto *Op : SA->operands()) 1865 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1866 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1867 } 1868 } 1869 // If the input value is a chrec scev, and we can prove that the value 1870 // did not overflow the old, smaller, value, we can sign extend all of the 1871 // operands (often constants). This allows analysis of something like 1872 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1873 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1874 if (AR->isAffine()) { 1875 const SCEV *Start = AR->getStart(); 1876 const SCEV *Step = AR->getStepRecurrence(*this); 1877 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1878 const Loop *L = AR->getLoop(); 1879 1880 if (!AR->hasNoSignedWrap()) { 1881 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1882 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1883 } 1884 1885 // If we have special knowledge that this addrec won't overflow, 1886 // we don't need to do any further analysis. 1887 if (AR->hasNoSignedWrap()) 1888 return getAddRecExpr( 1889 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1890 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1891 1892 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1893 // Note that this serves two purposes: It filters out loops that are 1894 // simply not analyzable, and it covers the case where this code is 1895 // being called from within backedge-taken count analysis, such that 1896 // attempting to ask for the backedge-taken count would likely result 1897 // in infinite recursion. In the later case, the analysis code will 1898 // cope with a conservative value, and it will take care to purge 1899 // that value once it has finished. 1900 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1901 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1902 // Manually compute the final value for AR, checking for 1903 // overflow. 1904 1905 // Check whether the backedge-taken count can be losslessly casted to 1906 // the addrec's type. The count is always unsigned. 1907 const SCEV *CastedMaxBECount = 1908 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1909 const SCEV *RecastedMaxBECount = 1910 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1911 if (MaxBECount == RecastedMaxBECount) { 1912 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1913 // Check whether Start+Step*MaxBECount has no signed overflow. 1914 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1915 SCEV::FlagAnyWrap, Depth + 1); 1916 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1917 SCEV::FlagAnyWrap, 1918 Depth + 1), 1919 WideTy, Depth + 1); 1920 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1921 const SCEV *WideMaxBECount = 1922 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1923 const SCEV *OperandExtendedAdd = 1924 getAddExpr(WideStart, 1925 getMulExpr(WideMaxBECount, 1926 getSignExtendExpr(Step, WideTy, Depth + 1), 1927 SCEV::FlagAnyWrap, Depth + 1), 1928 SCEV::FlagAnyWrap, Depth + 1); 1929 if (SAdd == OperandExtendedAdd) { 1930 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1931 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1932 // Return the expression with the addrec on the outside. 1933 return getAddRecExpr( 1934 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1935 Depth + 1), 1936 getSignExtendExpr(Step, Ty, Depth + 1), L, 1937 AR->getNoWrapFlags()); 1938 } 1939 // Similar to above, only this time treat the step value as unsigned. 1940 // This covers loops that count up with an unsigned step. 1941 OperandExtendedAdd = 1942 getAddExpr(WideStart, 1943 getMulExpr(WideMaxBECount, 1944 getZeroExtendExpr(Step, WideTy, Depth + 1), 1945 SCEV::FlagAnyWrap, Depth + 1), 1946 SCEV::FlagAnyWrap, Depth + 1); 1947 if (SAdd == OperandExtendedAdd) { 1948 // If AR wraps around then 1949 // 1950 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1951 // => SAdd != OperandExtendedAdd 1952 // 1953 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1954 // (SAdd == OperandExtendedAdd => AR is NW) 1955 1956 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1957 1958 // Return the expression with the addrec on the outside. 1959 return getAddRecExpr( 1960 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1961 Depth + 1), 1962 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1963 AR->getNoWrapFlags()); 1964 } 1965 } 1966 } 1967 1968 // Normally, in the cases we can prove no-overflow via a 1969 // backedge guarding condition, we can also compute a backedge 1970 // taken count for the loop. The exceptions are assumptions and 1971 // guards present in the loop -- SCEV is not great at exploiting 1972 // these to compute max backedge taken counts, but can still use 1973 // these to prove lack of overflow. Use this fact to avoid 1974 // doing extra work that may not pay off. 1975 1976 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1977 !AC.assumptions().empty()) { 1978 // If the backedge is guarded by a comparison with the pre-inc 1979 // value the addrec is safe. Also, if the entry is guarded by 1980 // a comparison with the start value and the backedge is 1981 // guarded by a comparison with the post-inc value, the addrec 1982 // is safe. 1983 ICmpInst::Predicate Pred; 1984 const SCEV *OverflowLimit = 1985 getSignedOverflowLimitForStep(Step, &Pred, this); 1986 if (OverflowLimit && 1987 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1988 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 1989 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1990 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1991 return getAddRecExpr( 1992 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1993 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1994 } 1995 } 1996 1997 // If Start and Step are constants, check if we can apply this 1998 // transformation: 1999 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 2000 auto *SC1 = dyn_cast<SCEVConstant>(Start); 2001 auto *SC2 = dyn_cast<SCEVConstant>(Step); 2002 if (SC1 && SC2) { 2003 const APInt &C1 = SC1->getAPInt(); 2004 const APInt &C2 = SC2->getAPInt(); 2005 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 2006 C2.isPowerOf2()) { 2007 Start = getSignExtendExpr(Start, Ty, Depth + 1); 2008 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 2009 AR->getNoWrapFlags()); 2010 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), 2011 SCEV::FlagAnyWrap, Depth + 1); 2012 } 2013 } 2014 2015 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2016 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2017 return getAddRecExpr( 2018 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2019 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2020 } 2021 } 2022 2023 // If the input value is provably positive and we could not simplify 2024 // away the sext build a zext instead. 2025 if (isKnownNonNegative(Op)) 2026 return getZeroExtendExpr(Op, Ty, Depth + 1); 2027 2028 // The cast wasn't folded; create an explicit cast node. 2029 // Recompute the insert position, as it may have been invalidated. 2030 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2031 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2032 Op, Ty); 2033 UniqueSCEVs.InsertNode(S, IP); 2034 addToLoopUseLists(S); 2035 return S; 2036 } 2037 2038 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2039 /// unspecified bits out to the given type. 2040 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2041 Type *Ty) { 2042 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2043 "This is not an extending conversion!"); 2044 assert(isSCEVable(Ty) && 2045 "This is not a conversion to a SCEVable type!"); 2046 Ty = getEffectiveSCEVType(Ty); 2047 2048 // Sign-extend negative constants. 2049 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2050 if (SC->getAPInt().isNegative()) 2051 return getSignExtendExpr(Op, Ty); 2052 2053 // Peel off a truncate cast. 2054 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2055 const SCEV *NewOp = T->getOperand(); 2056 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2057 return getAnyExtendExpr(NewOp, Ty); 2058 return getTruncateOrNoop(NewOp, Ty); 2059 } 2060 2061 // Next try a zext cast. If the cast is folded, use it. 2062 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2063 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2064 return ZExt; 2065 2066 // Next try a sext cast. If the cast is folded, use it. 2067 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2068 if (!isa<SCEVSignExtendExpr>(SExt)) 2069 return SExt; 2070 2071 // Force the cast to be folded into the operands of an addrec. 2072 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2073 SmallVector<const SCEV *, 4> Ops; 2074 for (const SCEV *Op : AR->operands()) 2075 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2076 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2077 } 2078 2079 // If the expression is obviously signed, use the sext cast value. 2080 if (isa<SCEVSMaxExpr>(Op)) 2081 return SExt; 2082 2083 // Absent any other information, use the zext cast value. 2084 return ZExt; 2085 } 2086 2087 /// Process the given Ops list, which is a list of operands to be added under 2088 /// the given scale, update the given map. This is a helper function for 2089 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2090 /// that would form an add expression like this: 2091 /// 2092 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2093 /// 2094 /// where A and B are constants, update the map with these values: 2095 /// 2096 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2097 /// 2098 /// and add 13 + A*B*29 to AccumulatedConstant. 2099 /// This will allow getAddRecExpr to produce this: 2100 /// 2101 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2102 /// 2103 /// This form often exposes folding opportunities that are hidden in 2104 /// the original operand list. 2105 /// 2106 /// Return true iff it appears that any interesting folding opportunities 2107 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2108 /// the common case where no interesting opportunities are present, and 2109 /// is also used as a check to avoid infinite recursion. 2110 static bool 2111 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2112 SmallVectorImpl<const SCEV *> &NewOps, 2113 APInt &AccumulatedConstant, 2114 const SCEV *const *Ops, size_t NumOperands, 2115 const APInt &Scale, 2116 ScalarEvolution &SE) { 2117 bool Interesting = false; 2118 2119 // Iterate over the add operands. They are sorted, with constants first. 2120 unsigned i = 0; 2121 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2122 ++i; 2123 // Pull a buried constant out to the outside. 2124 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2125 Interesting = true; 2126 AccumulatedConstant += Scale * C->getAPInt(); 2127 } 2128 2129 // Next comes everything else. We're especially interested in multiplies 2130 // here, but they're in the middle, so just visit the rest with one loop. 2131 for (; i != NumOperands; ++i) { 2132 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2133 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2134 APInt NewScale = 2135 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2136 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2137 // A multiplication of a constant with another add; recurse. 2138 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2139 Interesting |= 2140 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2141 Add->op_begin(), Add->getNumOperands(), 2142 NewScale, SE); 2143 } else { 2144 // A multiplication of a constant with some other value. Update 2145 // the map. 2146 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2147 const SCEV *Key = SE.getMulExpr(MulOps); 2148 auto Pair = M.insert({Key, NewScale}); 2149 if (Pair.second) { 2150 NewOps.push_back(Pair.first->first); 2151 } else { 2152 Pair.first->second += NewScale; 2153 // The map already had an entry for this value, which may indicate 2154 // a folding opportunity. 2155 Interesting = true; 2156 } 2157 } 2158 } else { 2159 // An ordinary operand. Update the map. 2160 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2161 M.insert({Ops[i], Scale}); 2162 if (Pair.second) { 2163 NewOps.push_back(Pair.first->first); 2164 } else { 2165 Pair.first->second += Scale; 2166 // The map already had an entry for this value, which may indicate 2167 // a folding opportunity. 2168 Interesting = true; 2169 } 2170 } 2171 } 2172 2173 return Interesting; 2174 } 2175 2176 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2177 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2178 // can't-overflow flags for the operation if possible. 2179 static SCEV::NoWrapFlags 2180 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2181 const SmallVectorImpl<const SCEV *> &Ops, 2182 SCEV::NoWrapFlags Flags) { 2183 using namespace std::placeholders; 2184 2185 using OBO = OverflowingBinaryOperator; 2186 2187 bool CanAnalyze = 2188 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2189 (void)CanAnalyze; 2190 assert(CanAnalyze && "don't call from other places!"); 2191 2192 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2193 SCEV::NoWrapFlags SignOrUnsignWrap = 2194 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2195 2196 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2197 auto IsKnownNonNegative = [&](const SCEV *S) { 2198 return SE->isKnownNonNegative(S); 2199 }; 2200 2201 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2202 Flags = 2203 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2204 2205 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2206 2207 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2208 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2209 2210 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2211 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2212 2213 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2214 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2215 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2216 Instruction::Add, C, OBO::NoSignedWrap); 2217 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2218 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2219 } 2220 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2221 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2222 Instruction::Add, C, OBO::NoUnsignedWrap); 2223 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2224 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2225 } 2226 } 2227 2228 return Flags; 2229 } 2230 2231 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2232 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2233 } 2234 2235 /// Get a canonical add expression, or something simpler if possible. 2236 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2237 SCEV::NoWrapFlags Flags, 2238 unsigned Depth) { 2239 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2240 "only nuw or nsw allowed"); 2241 assert(!Ops.empty() && "Cannot get empty add!"); 2242 if (Ops.size() == 1) return Ops[0]; 2243 #ifndef NDEBUG 2244 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2245 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2246 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2247 "SCEVAddExpr operand types don't match!"); 2248 #endif 2249 2250 // Sort by complexity, this groups all similar expression types together. 2251 GroupByComplexity(Ops, &LI, DT); 2252 2253 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2254 2255 // If there are any constants, fold them together. 2256 unsigned Idx = 0; 2257 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2258 ++Idx; 2259 assert(Idx < Ops.size()); 2260 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2261 // We found two constants, fold them together! 2262 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2263 if (Ops.size() == 2) return Ops[0]; 2264 Ops.erase(Ops.begin()+1); // Erase the folded element 2265 LHSC = cast<SCEVConstant>(Ops[0]); 2266 } 2267 2268 // If we are left with a constant zero being added, strip it off. 2269 if (LHSC->getValue()->isZero()) { 2270 Ops.erase(Ops.begin()); 2271 --Idx; 2272 } 2273 2274 if (Ops.size() == 1) return Ops[0]; 2275 } 2276 2277 // Limit recursion calls depth. 2278 if (Depth > MaxArithDepth) 2279 return getOrCreateAddExpr(Ops, Flags); 2280 2281 // Okay, check to see if the same value occurs in the operand list more than 2282 // once. If so, merge them together into an multiply expression. Since we 2283 // sorted the list, these values are required to be adjacent. 2284 Type *Ty = Ops[0]->getType(); 2285 bool FoundMatch = false; 2286 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2287 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2288 // Scan ahead to count how many equal operands there are. 2289 unsigned Count = 2; 2290 while (i+Count != e && Ops[i+Count] == Ops[i]) 2291 ++Count; 2292 // Merge the values into a multiply. 2293 const SCEV *Scale = getConstant(Ty, Count); 2294 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2295 if (Ops.size() == Count) 2296 return Mul; 2297 Ops[i] = Mul; 2298 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2299 --i; e -= Count - 1; 2300 FoundMatch = true; 2301 } 2302 if (FoundMatch) 2303 return getAddExpr(Ops, Flags, Depth + 1); 2304 2305 // Check for truncates. If all the operands are truncated from the same 2306 // type, see if factoring out the truncate would permit the result to be 2307 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2308 // if the contents of the resulting outer trunc fold to something simple. 2309 auto FindTruncSrcType = [&]() -> Type * { 2310 // We're ultimately looking to fold an addrec of truncs and muls of only 2311 // constants and truncs, so if we find any other types of SCEV 2312 // as operands of the addrec then we bail and return nullptr here. 2313 // Otherwise, we return the type of the operand of a trunc that we find. 2314 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2315 return T->getOperand()->getType(); 2316 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2317 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2318 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2319 return T->getOperand()->getType(); 2320 } 2321 return nullptr; 2322 }; 2323 if (auto *SrcType = FindTruncSrcType()) { 2324 SmallVector<const SCEV *, 8> LargeOps; 2325 bool Ok = true; 2326 // Check all the operands to see if they can be represented in the 2327 // source type of the truncate. 2328 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2329 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2330 if (T->getOperand()->getType() != SrcType) { 2331 Ok = false; 2332 break; 2333 } 2334 LargeOps.push_back(T->getOperand()); 2335 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2336 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2337 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2338 SmallVector<const SCEV *, 8> LargeMulOps; 2339 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2340 if (const SCEVTruncateExpr *T = 2341 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2342 if (T->getOperand()->getType() != SrcType) { 2343 Ok = false; 2344 break; 2345 } 2346 LargeMulOps.push_back(T->getOperand()); 2347 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2348 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2349 } else { 2350 Ok = false; 2351 break; 2352 } 2353 } 2354 if (Ok) 2355 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2356 } else { 2357 Ok = false; 2358 break; 2359 } 2360 } 2361 if (Ok) { 2362 // Evaluate the expression in the larger type. 2363 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2364 // If it folds to something simple, use it. Otherwise, don't. 2365 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2366 return getTruncateExpr(Fold, Ty); 2367 } 2368 } 2369 2370 // Skip past any other cast SCEVs. 2371 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2372 ++Idx; 2373 2374 // If there are add operands they would be next. 2375 if (Idx < Ops.size()) { 2376 bool DeletedAdd = false; 2377 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2378 if (Ops.size() > AddOpsInlineThreshold || 2379 Add->getNumOperands() > AddOpsInlineThreshold) 2380 break; 2381 // If we have an add, expand the add operands onto the end of the operands 2382 // list. 2383 Ops.erase(Ops.begin()+Idx); 2384 Ops.append(Add->op_begin(), Add->op_end()); 2385 DeletedAdd = true; 2386 } 2387 2388 // If we deleted at least one add, we added operands to the end of the list, 2389 // and they are not necessarily sorted. Recurse to resort and resimplify 2390 // any operands we just acquired. 2391 if (DeletedAdd) 2392 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2393 } 2394 2395 // Skip over the add expression until we get to a multiply. 2396 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2397 ++Idx; 2398 2399 // Check to see if there are any folding opportunities present with 2400 // operands multiplied by constant values. 2401 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2402 uint64_t BitWidth = getTypeSizeInBits(Ty); 2403 DenseMap<const SCEV *, APInt> M; 2404 SmallVector<const SCEV *, 8> NewOps; 2405 APInt AccumulatedConstant(BitWidth, 0); 2406 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2407 Ops.data(), Ops.size(), 2408 APInt(BitWidth, 1), *this)) { 2409 struct APIntCompare { 2410 bool operator()(const APInt &LHS, const APInt &RHS) const { 2411 return LHS.ult(RHS); 2412 } 2413 }; 2414 2415 // Some interesting folding opportunity is present, so its worthwhile to 2416 // re-generate the operands list. Group the operands by constant scale, 2417 // to avoid multiplying by the same constant scale multiple times. 2418 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2419 for (const SCEV *NewOp : NewOps) 2420 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2421 // Re-generate the operands list. 2422 Ops.clear(); 2423 if (AccumulatedConstant != 0) 2424 Ops.push_back(getConstant(AccumulatedConstant)); 2425 for (auto &MulOp : MulOpLists) 2426 if (MulOp.first != 0) 2427 Ops.push_back(getMulExpr( 2428 getConstant(MulOp.first), 2429 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2430 SCEV::FlagAnyWrap, Depth + 1)); 2431 if (Ops.empty()) 2432 return getZero(Ty); 2433 if (Ops.size() == 1) 2434 return Ops[0]; 2435 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2436 } 2437 } 2438 2439 // If we are adding something to a multiply expression, make sure the 2440 // something is not already an operand of the multiply. If so, merge it into 2441 // the multiply. 2442 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2443 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2444 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2445 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2446 if (isa<SCEVConstant>(MulOpSCEV)) 2447 continue; 2448 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2449 if (MulOpSCEV == Ops[AddOp]) { 2450 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2451 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2452 if (Mul->getNumOperands() != 2) { 2453 // If the multiply has more than two operands, we must get the 2454 // Y*Z term. 2455 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2456 Mul->op_begin()+MulOp); 2457 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2458 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2459 } 2460 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2461 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2462 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2463 SCEV::FlagAnyWrap, Depth + 1); 2464 if (Ops.size() == 2) return OuterMul; 2465 if (AddOp < Idx) { 2466 Ops.erase(Ops.begin()+AddOp); 2467 Ops.erase(Ops.begin()+Idx-1); 2468 } else { 2469 Ops.erase(Ops.begin()+Idx); 2470 Ops.erase(Ops.begin()+AddOp-1); 2471 } 2472 Ops.push_back(OuterMul); 2473 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2474 } 2475 2476 // Check this multiply against other multiplies being added together. 2477 for (unsigned OtherMulIdx = Idx+1; 2478 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2479 ++OtherMulIdx) { 2480 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2481 // If MulOp occurs in OtherMul, we can fold the two multiplies 2482 // together. 2483 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2484 OMulOp != e; ++OMulOp) 2485 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2486 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2487 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2488 if (Mul->getNumOperands() != 2) { 2489 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2490 Mul->op_begin()+MulOp); 2491 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2492 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2493 } 2494 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2495 if (OtherMul->getNumOperands() != 2) { 2496 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2497 OtherMul->op_begin()+OMulOp); 2498 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2499 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2500 } 2501 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2502 const SCEV *InnerMulSum = 2503 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2504 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2505 SCEV::FlagAnyWrap, Depth + 1); 2506 if (Ops.size() == 2) return OuterMul; 2507 Ops.erase(Ops.begin()+Idx); 2508 Ops.erase(Ops.begin()+OtherMulIdx-1); 2509 Ops.push_back(OuterMul); 2510 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2511 } 2512 } 2513 } 2514 } 2515 2516 // If there are any add recurrences in the operands list, see if any other 2517 // added values are loop invariant. If so, we can fold them into the 2518 // recurrence. 2519 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2520 ++Idx; 2521 2522 // Scan over all recurrences, trying to fold loop invariants into them. 2523 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2524 // Scan all of the other operands to this add and add them to the vector if 2525 // they are loop invariant w.r.t. the recurrence. 2526 SmallVector<const SCEV *, 8> LIOps; 2527 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2528 const Loop *AddRecLoop = AddRec->getLoop(); 2529 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2530 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2531 LIOps.push_back(Ops[i]); 2532 Ops.erase(Ops.begin()+i); 2533 --i; --e; 2534 } 2535 2536 // If we found some loop invariants, fold them into the recurrence. 2537 if (!LIOps.empty()) { 2538 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2539 LIOps.push_back(AddRec->getStart()); 2540 2541 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2542 AddRec->op_end()); 2543 // This follows from the fact that the no-wrap flags on the outer add 2544 // expression are applicable on the 0th iteration, when the add recurrence 2545 // will be equal to its start value. 2546 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2547 2548 // Build the new addrec. Propagate the NUW and NSW flags if both the 2549 // outer add and the inner addrec are guaranteed to have no overflow. 2550 // Always propagate NW. 2551 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2552 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2553 2554 // If all of the other operands were loop invariant, we are done. 2555 if (Ops.size() == 1) return NewRec; 2556 2557 // Otherwise, add the folded AddRec by the non-invariant parts. 2558 for (unsigned i = 0;; ++i) 2559 if (Ops[i] == AddRec) { 2560 Ops[i] = NewRec; 2561 break; 2562 } 2563 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2564 } 2565 2566 // Okay, if there weren't any loop invariants to be folded, check to see if 2567 // there are multiple AddRec's with the same loop induction variable being 2568 // added together. If so, we can fold them. 2569 for (unsigned OtherIdx = Idx+1; 2570 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2571 ++OtherIdx) { 2572 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2573 // so that the 1st found AddRecExpr is dominated by all others. 2574 assert(DT.dominates( 2575 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2576 AddRec->getLoop()->getHeader()) && 2577 "AddRecExprs are not sorted in reverse dominance order?"); 2578 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2579 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2580 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2581 AddRec->op_end()); 2582 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2583 ++OtherIdx) { 2584 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2585 if (OtherAddRec->getLoop() == AddRecLoop) { 2586 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2587 i != e; ++i) { 2588 if (i >= AddRecOps.size()) { 2589 AddRecOps.append(OtherAddRec->op_begin()+i, 2590 OtherAddRec->op_end()); 2591 break; 2592 } 2593 SmallVector<const SCEV *, 2> TwoOps = { 2594 AddRecOps[i], OtherAddRec->getOperand(i)}; 2595 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2596 } 2597 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2598 } 2599 } 2600 // Step size has changed, so we cannot guarantee no self-wraparound. 2601 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2602 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2603 } 2604 } 2605 2606 // Otherwise couldn't fold anything into this recurrence. Move onto the 2607 // next one. 2608 } 2609 2610 // Okay, it looks like we really DO need an add expr. Check to see if we 2611 // already have one, otherwise create a new one. 2612 return getOrCreateAddExpr(Ops, Flags); 2613 } 2614 2615 const SCEV * 2616 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2617 SCEV::NoWrapFlags Flags) { 2618 FoldingSetNodeID ID; 2619 ID.AddInteger(scAddExpr); 2620 for (const SCEV *Op : Ops) 2621 ID.AddPointer(Op); 2622 void *IP = nullptr; 2623 SCEVAddExpr *S = 2624 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2625 if (!S) { 2626 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2627 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2628 S = new (SCEVAllocator) 2629 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2630 UniqueSCEVs.InsertNode(S, IP); 2631 addToLoopUseLists(S); 2632 } 2633 S->setNoWrapFlags(Flags); 2634 return S; 2635 } 2636 2637 const SCEV * 2638 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2639 SCEV::NoWrapFlags Flags) { 2640 FoldingSetNodeID ID; 2641 ID.AddInteger(scMulExpr); 2642 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2643 ID.AddPointer(Ops[i]); 2644 void *IP = nullptr; 2645 SCEVMulExpr *S = 2646 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2647 if (!S) { 2648 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2649 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2650 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2651 O, Ops.size()); 2652 UniqueSCEVs.InsertNode(S, IP); 2653 addToLoopUseLists(S); 2654 } 2655 S->setNoWrapFlags(Flags); 2656 return S; 2657 } 2658 2659 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2660 uint64_t k = i*j; 2661 if (j > 1 && k / j != i) Overflow = true; 2662 return k; 2663 } 2664 2665 /// Compute the result of "n choose k", the binomial coefficient. If an 2666 /// intermediate computation overflows, Overflow will be set and the return will 2667 /// be garbage. Overflow is not cleared on absence of overflow. 2668 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2669 // We use the multiplicative formula: 2670 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2671 // At each iteration, we take the n-th term of the numeral and divide by the 2672 // (k-n)th term of the denominator. This division will always produce an 2673 // integral result, and helps reduce the chance of overflow in the 2674 // intermediate computations. However, we can still overflow even when the 2675 // final result would fit. 2676 2677 if (n == 0 || n == k) return 1; 2678 if (k > n) return 0; 2679 2680 if (k > n/2) 2681 k = n-k; 2682 2683 uint64_t r = 1; 2684 for (uint64_t i = 1; i <= k; ++i) { 2685 r = umul_ov(r, n-(i-1), Overflow); 2686 r /= i; 2687 } 2688 return r; 2689 } 2690 2691 /// Determine if any of the operands in this SCEV are a constant or if 2692 /// any of the add or multiply expressions in this SCEV contain a constant. 2693 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2694 struct FindConstantInAddMulChain { 2695 bool FoundConstant = false; 2696 2697 bool follow(const SCEV *S) { 2698 FoundConstant |= isa<SCEVConstant>(S); 2699 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2700 } 2701 2702 bool isDone() const { 2703 return FoundConstant; 2704 } 2705 }; 2706 2707 FindConstantInAddMulChain F; 2708 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2709 ST.visitAll(StartExpr); 2710 return F.FoundConstant; 2711 } 2712 2713 /// Get a canonical multiply expression, or something simpler if possible. 2714 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2715 SCEV::NoWrapFlags Flags, 2716 unsigned Depth) { 2717 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2718 "only nuw or nsw allowed"); 2719 assert(!Ops.empty() && "Cannot get empty mul!"); 2720 if (Ops.size() == 1) return Ops[0]; 2721 #ifndef NDEBUG 2722 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2723 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2724 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2725 "SCEVMulExpr operand types don't match!"); 2726 #endif 2727 2728 // Sort by complexity, this groups all similar expression types together. 2729 GroupByComplexity(Ops, &LI, DT); 2730 2731 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2732 2733 // Limit recursion calls depth. 2734 if (Depth > MaxArithDepth) 2735 return getOrCreateMulExpr(Ops, Flags); 2736 2737 // If there are any constants, fold them together. 2738 unsigned Idx = 0; 2739 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2740 2741 // C1*(C2+V) -> C1*C2 + C1*V 2742 if (Ops.size() == 2) 2743 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2744 // If any of Add's ops are Adds or Muls with a constant, 2745 // apply this transformation as well. 2746 if (Add->getNumOperands() == 2) 2747 // TODO: There are some cases where this transformation is not 2748 // profitable, for example: 2749 // Add = (C0 + X) * Y + Z. 2750 // Maybe the scope of this transformation should be narrowed down. 2751 if (containsConstantInAddMulChain(Add)) 2752 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2753 SCEV::FlagAnyWrap, Depth + 1), 2754 getMulExpr(LHSC, Add->getOperand(1), 2755 SCEV::FlagAnyWrap, Depth + 1), 2756 SCEV::FlagAnyWrap, Depth + 1); 2757 2758 ++Idx; 2759 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2760 // We found two constants, fold them together! 2761 ConstantInt *Fold = 2762 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2763 Ops[0] = getConstant(Fold); 2764 Ops.erase(Ops.begin()+1); // Erase the folded element 2765 if (Ops.size() == 1) return Ops[0]; 2766 LHSC = cast<SCEVConstant>(Ops[0]); 2767 } 2768 2769 // If we are left with a constant one being multiplied, strip it off. 2770 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2771 Ops.erase(Ops.begin()); 2772 --Idx; 2773 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2774 // If we have a multiply of zero, it will always be zero. 2775 return Ops[0]; 2776 } else if (Ops[0]->isAllOnesValue()) { 2777 // If we have a mul by -1 of an add, try distributing the -1 among the 2778 // add operands. 2779 if (Ops.size() == 2) { 2780 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2781 SmallVector<const SCEV *, 4> NewOps; 2782 bool AnyFolded = false; 2783 for (const SCEV *AddOp : Add->operands()) { 2784 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2785 Depth + 1); 2786 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2787 NewOps.push_back(Mul); 2788 } 2789 if (AnyFolded) 2790 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2791 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2792 // Negation preserves a recurrence's no self-wrap property. 2793 SmallVector<const SCEV *, 4> Operands; 2794 for (const SCEV *AddRecOp : AddRec->operands()) 2795 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2796 Depth + 1)); 2797 2798 return getAddRecExpr(Operands, AddRec->getLoop(), 2799 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2800 } 2801 } 2802 } 2803 2804 if (Ops.size() == 1) 2805 return Ops[0]; 2806 } 2807 2808 // Skip over the add expression until we get to a multiply. 2809 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2810 ++Idx; 2811 2812 // If there are mul operands inline them all into this expression. 2813 if (Idx < Ops.size()) { 2814 bool DeletedMul = false; 2815 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2816 if (Ops.size() > MulOpsInlineThreshold) 2817 break; 2818 // If we have an mul, expand the mul operands onto the end of the 2819 // operands list. 2820 Ops.erase(Ops.begin()+Idx); 2821 Ops.append(Mul->op_begin(), Mul->op_end()); 2822 DeletedMul = true; 2823 } 2824 2825 // If we deleted at least one mul, we added operands to the end of the 2826 // list, and they are not necessarily sorted. Recurse to resort and 2827 // resimplify any operands we just acquired. 2828 if (DeletedMul) 2829 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2830 } 2831 2832 // If there are any add recurrences in the operands list, see if any other 2833 // added values are loop invariant. If so, we can fold them into the 2834 // recurrence. 2835 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2836 ++Idx; 2837 2838 // Scan over all recurrences, trying to fold loop invariants into them. 2839 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2840 // Scan all of the other operands to this mul and add them to the vector 2841 // if they are loop invariant w.r.t. the recurrence. 2842 SmallVector<const SCEV *, 8> LIOps; 2843 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2844 const Loop *AddRecLoop = AddRec->getLoop(); 2845 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2846 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2847 LIOps.push_back(Ops[i]); 2848 Ops.erase(Ops.begin()+i); 2849 --i; --e; 2850 } 2851 2852 // If we found some loop invariants, fold them into the recurrence. 2853 if (!LIOps.empty()) { 2854 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2855 SmallVector<const SCEV *, 4> NewOps; 2856 NewOps.reserve(AddRec->getNumOperands()); 2857 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2858 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2859 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2860 SCEV::FlagAnyWrap, Depth + 1)); 2861 2862 // Build the new addrec. Propagate the NUW and NSW flags if both the 2863 // outer mul and the inner addrec are guaranteed to have no overflow. 2864 // 2865 // No self-wrap cannot be guaranteed after changing the step size, but 2866 // will be inferred if either NUW or NSW is true. 2867 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2868 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2869 2870 // If all of the other operands were loop invariant, we are done. 2871 if (Ops.size() == 1) return NewRec; 2872 2873 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2874 for (unsigned i = 0;; ++i) 2875 if (Ops[i] == AddRec) { 2876 Ops[i] = NewRec; 2877 break; 2878 } 2879 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2880 } 2881 2882 // Okay, if there weren't any loop invariants to be folded, check to see 2883 // if there are multiple AddRec's with the same loop induction variable 2884 // being multiplied together. If so, we can fold them. 2885 2886 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2887 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2888 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2889 // ]]],+,...up to x=2n}. 2890 // Note that the arguments to choose() are always integers with values 2891 // known at compile time, never SCEV objects. 2892 // 2893 // The implementation avoids pointless extra computations when the two 2894 // addrec's are of different length (mathematically, it's equivalent to 2895 // an infinite stream of zeros on the right). 2896 bool OpsModified = false; 2897 for (unsigned OtherIdx = Idx+1; 2898 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2899 ++OtherIdx) { 2900 const SCEVAddRecExpr *OtherAddRec = 2901 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2902 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2903 continue; 2904 2905 // Limit max number of arguments to avoid creation of unreasonably big 2906 // SCEVAddRecs with very complex operands. 2907 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2908 MaxAddRecSize) 2909 continue; 2910 2911 bool Overflow = false; 2912 Type *Ty = AddRec->getType(); 2913 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2914 SmallVector<const SCEV*, 7> AddRecOps; 2915 for (int x = 0, xe = AddRec->getNumOperands() + 2916 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2917 const SCEV *Term = getZero(Ty); 2918 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2919 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2920 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2921 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2922 z < ze && !Overflow; ++z) { 2923 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2924 uint64_t Coeff; 2925 if (LargerThan64Bits) 2926 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2927 else 2928 Coeff = Coeff1*Coeff2; 2929 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2930 const SCEV *Term1 = AddRec->getOperand(y-z); 2931 const SCEV *Term2 = OtherAddRec->getOperand(z); 2932 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2933 SCEV::FlagAnyWrap, Depth + 1), 2934 SCEV::FlagAnyWrap, Depth + 1); 2935 } 2936 } 2937 AddRecOps.push_back(Term); 2938 } 2939 if (!Overflow) { 2940 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2941 SCEV::FlagAnyWrap); 2942 if (Ops.size() == 2) return NewAddRec; 2943 Ops[Idx] = NewAddRec; 2944 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2945 OpsModified = true; 2946 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2947 if (!AddRec) 2948 break; 2949 } 2950 } 2951 if (OpsModified) 2952 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2953 2954 // Otherwise couldn't fold anything into this recurrence. Move onto the 2955 // next one. 2956 } 2957 2958 // Okay, it looks like we really DO need an mul expr. Check to see if we 2959 // already have one, otherwise create a new one. 2960 return getOrCreateMulExpr(Ops, Flags); 2961 } 2962 2963 /// Represents an unsigned remainder expression based on unsigned division. 2964 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 2965 const SCEV *RHS) { 2966 assert(getEffectiveSCEVType(LHS->getType()) == 2967 getEffectiveSCEVType(RHS->getType()) && 2968 "SCEVURemExpr operand types don't match!"); 2969 2970 // Short-circuit easy cases 2971 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2972 // If constant is one, the result is trivial 2973 if (RHSC->getValue()->isOne()) 2974 return getZero(LHS->getType()); // X urem 1 --> 0 2975 2976 // If constant is a power of two, fold into a zext(trunc(LHS)). 2977 if (RHSC->getAPInt().isPowerOf2()) { 2978 Type *FullTy = LHS->getType(); 2979 Type *TruncTy = 2980 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 2981 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 2982 } 2983 } 2984 2985 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 2986 const SCEV *UDiv = getUDivExpr(LHS, RHS); 2987 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 2988 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 2989 } 2990 2991 /// Get a canonical unsigned division expression, or something simpler if 2992 /// possible. 2993 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2994 const SCEV *RHS) { 2995 assert(getEffectiveSCEVType(LHS->getType()) == 2996 getEffectiveSCEVType(RHS->getType()) && 2997 "SCEVUDivExpr operand types don't match!"); 2998 2999 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3000 if (RHSC->getValue()->isOne()) 3001 return LHS; // X udiv 1 --> x 3002 // If the denominator is zero, the result of the udiv is undefined. Don't 3003 // try to analyze it, because the resolution chosen here may differ from 3004 // the resolution chosen in other parts of the compiler. 3005 if (!RHSC->getValue()->isZero()) { 3006 // Determine if the division can be folded into the operands of 3007 // its operands. 3008 // TODO: Generalize this to non-constants by using known-bits information. 3009 Type *Ty = LHS->getType(); 3010 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3011 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3012 // For non-power-of-two values, effectively round the value up to the 3013 // nearest power of two. 3014 if (!RHSC->getAPInt().isPowerOf2()) 3015 ++MaxShiftAmt; 3016 IntegerType *ExtTy = 3017 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3018 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3019 if (const SCEVConstant *Step = 3020 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3021 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3022 const APInt &StepInt = Step->getAPInt(); 3023 const APInt &DivInt = RHSC->getAPInt(); 3024 if (!StepInt.urem(DivInt) && 3025 getZeroExtendExpr(AR, ExtTy) == 3026 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3027 getZeroExtendExpr(Step, ExtTy), 3028 AR->getLoop(), SCEV::FlagAnyWrap)) { 3029 SmallVector<const SCEV *, 4> Operands; 3030 for (const SCEV *Op : AR->operands()) 3031 Operands.push_back(getUDivExpr(Op, RHS)); 3032 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3033 } 3034 /// Get a canonical UDivExpr for a recurrence. 3035 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3036 // We can currently only fold X%N if X is constant. 3037 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3038 if (StartC && !DivInt.urem(StepInt) && 3039 getZeroExtendExpr(AR, ExtTy) == 3040 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3041 getZeroExtendExpr(Step, ExtTy), 3042 AR->getLoop(), SCEV::FlagAnyWrap)) { 3043 const APInt &StartInt = StartC->getAPInt(); 3044 const APInt &StartRem = StartInt.urem(StepInt); 3045 if (StartRem != 0) 3046 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3047 AR->getLoop(), SCEV::FlagNW); 3048 } 3049 } 3050 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3051 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3052 SmallVector<const SCEV *, 4> Operands; 3053 for (const SCEV *Op : M->operands()) 3054 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3055 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3056 // Find an operand that's safely divisible. 3057 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3058 const SCEV *Op = M->getOperand(i); 3059 const SCEV *Div = getUDivExpr(Op, RHSC); 3060 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3061 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3062 M->op_end()); 3063 Operands[i] = Div; 3064 return getMulExpr(Operands); 3065 } 3066 } 3067 } 3068 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3069 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3070 SmallVector<const SCEV *, 4> Operands; 3071 for (const SCEV *Op : A->operands()) 3072 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3073 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3074 Operands.clear(); 3075 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3076 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3077 if (isa<SCEVUDivExpr>(Op) || 3078 getMulExpr(Op, RHS) != A->getOperand(i)) 3079 break; 3080 Operands.push_back(Op); 3081 } 3082 if (Operands.size() == A->getNumOperands()) 3083 return getAddExpr(Operands); 3084 } 3085 } 3086 3087 // Fold if both operands are constant. 3088 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3089 Constant *LHSCV = LHSC->getValue(); 3090 Constant *RHSCV = RHSC->getValue(); 3091 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3092 RHSCV))); 3093 } 3094 } 3095 } 3096 3097 FoldingSetNodeID ID; 3098 ID.AddInteger(scUDivExpr); 3099 ID.AddPointer(LHS); 3100 ID.AddPointer(RHS); 3101 void *IP = nullptr; 3102 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3103 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3104 LHS, RHS); 3105 UniqueSCEVs.InsertNode(S, IP); 3106 addToLoopUseLists(S); 3107 return S; 3108 } 3109 3110 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3111 APInt A = C1->getAPInt().abs(); 3112 APInt B = C2->getAPInt().abs(); 3113 uint32_t ABW = A.getBitWidth(); 3114 uint32_t BBW = B.getBitWidth(); 3115 3116 if (ABW > BBW) 3117 B = B.zext(ABW); 3118 else if (ABW < BBW) 3119 A = A.zext(BBW); 3120 3121 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3122 } 3123 3124 /// Get a canonical unsigned division expression, or something simpler if 3125 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3126 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3127 /// it's not exact because the udiv may be clearing bits. 3128 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3129 const SCEV *RHS) { 3130 // TODO: we could try to find factors in all sorts of things, but for now we 3131 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3132 // end of this file for inspiration. 3133 3134 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3135 if (!Mul || !Mul->hasNoUnsignedWrap()) 3136 return getUDivExpr(LHS, RHS); 3137 3138 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3139 // If the mulexpr multiplies by a constant, then that constant must be the 3140 // first element of the mulexpr. 3141 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3142 if (LHSCst == RHSCst) { 3143 SmallVector<const SCEV *, 2> Operands; 3144 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3145 return getMulExpr(Operands); 3146 } 3147 3148 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3149 // that there's a factor provided by one of the other terms. We need to 3150 // check. 3151 APInt Factor = gcd(LHSCst, RHSCst); 3152 if (!Factor.isIntN(1)) { 3153 LHSCst = 3154 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3155 RHSCst = 3156 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3157 SmallVector<const SCEV *, 2> Operands; 3158 Operands.push_back(LHSCst); 3159 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3160 LHS = getMulExpr(Operands); 3161 RHS = RHSCst; 3162 Mul = dyn_cast<SCEVMulExpr>(LHS); 3163 if (!Mul) 3164 return getUDivExactExpr(LHS, RHS); 3165 } 3166 } 3167 } 3168 3169 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3170 if (Mul->getOperand(i) == RHS) { 3171 SmallVector<const SCEV *, 2> Operands; 3172 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3173 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3174 return getMulExpr(Operands); 3175 } 3176 } 3177 3178 return getUDivExpr(LHS, RHS); 3179 } 3180 3181 /// Get an add recurrence expression for the specified loop. Simplify the 3182 /// expression as much as possible. 3183 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3184 const Loop *L, 3185 SCEV::NoWrapFlags Flags) { 3186 SmallVector<const SCEV *, 4> Operands; 3187 Operands.push_back(Start); 3188 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3189 if (StepChrec->getLoop() == L) { 3190 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3191 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3192 } 3193 3194 Operands.push_back(Step); 3195 return getAddRecExpr(Operands, L, Flags); 3196 } 3197 3198 /// Get an add recurrence expression for the specified loop. Simplify the 3199 /// expression as much as possible. 3200 const SCEV * 3201 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3202 const Loop *L, SCEV::NoWrapFlags Flags) { 3203 if (Operands.size() == 1) return Operands[0]; 3204 #ifndef NDEBUG 3205 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3206 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3207 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3208 "SCEVAddRecExpr operand types don't match!"); 3209 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3210 assert(isLoopInvariant(Operands[i], L) && 3211 "SCEVAddRecExpr operand is not loop-invariant!"); 3212 #endif 3213 3214 if (Operands.back()->isZero()) { 3215 Operands.pop_back(); 3216 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3217 } 3218 3219 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3220 // use that information to infer NUW and NSW flags. However, computing a 3221 // BE count requires calling getAddRecExpr, so we may not yet have a 3222 // meaningful BE count at this point (and if we don't, we'd be stuck 3223 // with a SCEVCouldNotCompute as the cached BE count). 3224 3225 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3226 3227 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3228 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3229 const Loop *NestedLoop = NestedAR->getLoop(); 3230 if (L->contains(NestedLoop) 3231 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3232 : (!NestedLoop->contains(L) && 3233 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3234 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3235 NestedAR->op_end()); 3236 Operands[0] = NestedAR->getStart(); 3237 // AddRecs require their operands be loop-invariant with respect to their 3238 // loops. Don't perform this transformation if it would break this 3239 // requirement. 3240 bool AllInvariant = all_of( 3241 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3242 3243 if (AllInvariant) { 3244 // Create a recurrence for the outer loop with the same step size. 3245 // 3246 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3247 // inner recurrence has the same property. 3248 SCEV::NoWrapFlags OuterFlags = 3249 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3250 3251 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3252 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3253 return isLoopInvariant(Op, NestedLoop); 3254 }); 3255 3256 if (AllInvariant) { 3257 // Ok, both add recurrences are valid after the transformation. 3258 // 3259 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3260 // the outer recurrence has the same property. 3261 SCEV::NoWrapFlags InnerFlags = 3262 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3263 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3264 } 3265 } 3266 // Reset Operands to its original state. 3267 Operands[0] = NestedAR; 3268 } 3269 } 3270 3271 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3272 // already have one, otherwise create a new one. 3273 FoldingSetNodeID ID; 3274 ID.AddInteger(scAddRecExpr); 3275 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3276 ID.AddPointer(Operands[i]); 3277 ID.AddPointer(L); 3278 void *IP = nullptr; 3279 SCEVAddRecExpr *S = 3280 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3281 if (!S) { 3282 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3283 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3284 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3285 O, Operands.size(), L); 3286 UniqueSCEVs.InsertNode(S, IP); 3287 addToLoopUseLists(S); 3288 } 3289 S->setNoWrapFlags(Flags); 3290 return S; 3291 } 3292 3293 const SCEV * 3294 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3295 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3296 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3297 // getSCEV(Base)->getType() has the same address space as Base->getType() 3298 // because SCEV::getType() preserves the address space. 3299 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3300 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3301 // instruction to its SCEV, because the Instruction may be guarded by control 3302 // flow and the no-overflow bits may not be valid for the expression in any 3303 // context. This can be fixed similarly to how these flags are handled for 3304 // adds. 3305 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3306 : SCEV::FlagAnyWrap; 3307 3308 const SCEV *TotalOffset = getZero(IntPtrTy); 3309 // The array size is unimportant. The first thing we do on CurTy is getting 3310 // its element type. 3311 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3312 for (const SCEV *IndexExpr : IndexExprs) { 3313 // Compute the (potentially symbolic) offset in bytes for this index. 3314 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3315 // For a struct, add the member offset. 3316 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3317 unsigned FieldNo = Index->getZExtValue(); 3318 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3319 3320 // Add the field offset to the running total offset. 3321 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3322 3323 // Update CurTy to the type of the field at Index. 3324 CurTy = STy->getTypeAtIndex(Index); 3325 } else { 3326 // Update CurTy to its element type. 3327 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3328 // For an array, add the element offset, explicitly scaled. 3329 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3330 // Getelementptr indices are signed. 3331 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3332 3333 // Multiply the index by the element size to compute the element offset. 3334 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3335 3336 // Add the element offset to the running total offset. 3337 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3338 } 3339 } 3340 3341 // Add the total offset from all the GEP indices to the base. 3342 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3343 } 3344 3345 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3346 const SCEV *RHS) { 3347 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3348 return getSMaxExpr(Ops); 3349 } 3350 3351 const SCEV * 3352 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3353 assert(!Ops.empty() && "Cannot get empty smax!"); 3354 if (Ops.size() == 1) return Ops[0]; 3355 #ifndef NDEBUG 3356 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3357 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3358 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3359 "SCEVSMaxExpr operand types don't match!"); 3360 #endif 3361 3362 // Sort by complexity, this groups all similar expression types together. 3363 GroupByComplexity(Ops, &LI, DT); 3364 3365 // If there are any constants, fold them together. 3366 unsigned Idx = 0; 3367 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3368 ++Idx; 3369 assert(Idx < Ops.size()); 3370 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3371 // We found two constants, fold them together! 3372 ConstantInt *Fold = ConstantInt::get( 3373 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3374 Ops[0] = getConstant(Fold); 3375 Ops.erase(Ops.begin()+1); // Erase the folded element 3376 if (Ops.size() == 1) return Ops[0]; 3377 LHSC = cast<SCEVConstant>(Ops[0]); 3378 } 3379 3380 // If we are left with a constant minimum-int, strip it off. 3381 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3382 Ops.erase(Ops.begin()); 3383 --Idx; 3384 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3385 // If we have an smax with a constant maximum-int, it will always be 3386 // maximum-int. 3387 return Ops[0]; 3388 } 3389 3390 if (Ops.size() == 1) return Ops[0]; 3391 } 3392 3393 // Find the first SMax 3394 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3395 ++Idx; 3396 3397 // Check to see if one of the operands is an SMax. If so, expand its operands 3398 // onto our operand list, and recurse to simplify. 3399 if (Idx < Ops.size()) { 3400 bool DeletedSMax = false; 3401 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3402 Ops.erase(Ops.begin()+Idx); 3403 Ops.append(SMax->op_begin(), SMax->op_end()); 3404 DeletedSMax = true; 3405 } 3406 3407 if (DeletedSMax) 3408 return getSMaxExpr(Ops); 3409 } 3410 3411 // Okay, check to see if the same value occurs in the operand list twice. If 3412 // so, delete one. Since we sorted the list, these values are required to 3413 // be adjacent. 3414 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3415 // X smax Y smax Y --> X smax Y 3416 // X smax Y --> X, if X is always greater than Y 3417 if (Ops[i] == Ops[i+1] || 3418 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3419 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3420 --i; --e; 3421 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3422 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3423 --i; --e; 3424 } 3425 3426 if (Ops.size() == 1) return Ops[0]; 3427 3428 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3429 3430 // Okay, it looks like we really DO need an smax expr. Check to see if we 3431 // already have one, otherwise create a new one. 3432 FoldingSetNodeID ID; 3433 ID.AddInteger(scSMaxExpr); 3434 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3435 ID.AddPointer(Ops[i]); 3436 void *IP = nullptr; 3437 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3438 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3439 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3440 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3441 O, Ops.size()); 3442 UniqueSCEVs.InsertNode(S, IP); 3443 addToLoopUseLists(S); 3444 return S; 3445 } 3446 3447 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3448 const SCEV *RHS) { 3449 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3450 return getUMaxExpr(Ops); 3451 } 3452 3453 const SCEV * 3454 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3455 assert(!Ops.empty() && "Cannot get empty umax!"); 3456 if (Ops.size() == 1) return Ops[0]; 3457 #ifndef NDEBUG 3458 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3459 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3460 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3461 "SCEVUMaxExpr operand types don't match!"); 3462 #endif 3463 3464 // Sort by complexity, this groups all similar expression types together. 3465 GroupByComplexity(Ops, &LI, DT); 3466 3467 // If there are any constants, fold them together. 3468 unsigned Idx = 0; 3469 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3470 ++Idx; 3471 assert(Idx < Ops.size()); 3472 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3473 // We found two constants, fold them together! 3474 ConstantInt *Fold = ConstantInt::get( 3475 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3476 Ops[0] = getConstant(Fold); 3477 Ops.erase(Ops.begin()+1); // Erase the folded element 3478 if (Ops.size() == 1) return Ops[0]; 3479 LHSC = cast<SCEVConstant>(Ops[0]); 3480 } 3481 3482 // If we are left with a constant minimum-int, strip it off. 3483 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3484 Ops.erase(Ops.begin()); 3485 --Idx; 3486 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3487 // If we have an umax with a constant maximum-int, it will always be 3488 // maximum-int. 3489 return Ops[0]; 3490 } 3491 3492 if (Ops.size() == 1) return Ops[0]; 3493 } 3494 3495 // Find the first UMax 3496 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3497 ++Idx; 3498 3499 // Check to see if one of the operands is a UMax. If so, expand its operands 3500 // onto our operand list, and recurse to simplify. 3501 if (Idx < Ops.size()) { 3502 bool DeletedUMax = false; 3503 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3504 Ops.erase(Ops.begin()+Idx); 3505 Ops.append(UMax->op_begin(), UMax->op_end()); 3506 DeletedUMax = true; 3507 } 3508 3509 if (DeletedUMax) 3510 return getUMaxExpr(Ops); 3511 } 3512 3513 // Okay, check to see if the same value occurs in the operand list twice. If 3514 // so, delete one. Since we sorted the list, these values are required to 3515 // be adjacent. 3516 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3517 // X umax Y umax Y --> X umax Y 3518 // X umax Y --> X, if X is always greater than Y 3519 if (Ops[i] == Ops[i+1] || 3520 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3521 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3522 --i; --e; 3523 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3524 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3525 --i; --e; 3526 } 3527 3528 if (Ops.size() == 1) return Ops[0]; 3529 3530 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3531 3532 // Okay, it looks like we really DO need a umax expr. Check to see if we 3533 // already have one, otherwise create a new one. 3534 FoldingSetNodeID ID; 3535 ID.AddInteger(scUMaxExpr); 3536 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3537 ID.AddPointer(Ops[i]); 3538 void *IP = nullptr; 3539 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3540 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3541 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3542 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3543 O, Ops.size()); 3544 UniqueSCEVs.InsertNode(S, IP); 3545 addToLoopUseLists(S); 3546 return S; 3547 } 3548 3549 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3550 const SCEV *RHS) { 3551 // ~smax(~x, ~y) == smin(x, y). 3552 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3553 } 3554 3555 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3556 const SCEV *RHS) { 3557 // ~umax(~x, ~y) == umin(x, y) 3558 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3559 } 3560 3561 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3562 // We can bypass creating a target-independent 3563 // constant expression and then folding it back into a ConstantInt. 3564 // This is just a compile-time optimization. 3565 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3566 } 3567 3568 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3569 StructType *STy, 3570 unsigned FieldNo) { 3571 // We can bypass creating a target-independent 3572 // constant expression and then folding it back into a ConstantInt. 3573 // This is just a compile-time optimization. 3574 return getConstant( 3575 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3576 } 3577 3578 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3579 // Don't attempt to do anything other than create a SCEVUnknown object 3580 // here. createSCEV only calls getUnknown after checking for all other 3581 // interesting possibilities, and any other code that calls getUnknown 3582 // is doing so in order to hide a value from SCEV canonicalization. 3583 3584 FoldingSetNodeID ID; 3585 ID.AddInteger(scUnknown); 3586 ID.AddPointer(V); 3587 void *IP = nullptr; 3588 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3589 assert(cast<SCEVUnknown>(S)->getValue() == V && 3590 "Stale SCEVUnknown in uniquing map!"); 3591 return S; 3592 } 3593 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3594 FirstUnknown); 3595 FirstUnknown = cast<SCEVUnknown>(S); 3596 UniqueSCEVs.InsertNode(S, IP); 3597 return S; 3598 } 3599 3600 //===----------------------------------------------------------------------===// 3601 // Basic SCEV Analysis and PHI Idiom Recognition Code 3602 // 3603 3604 /// Test if values of the given type are analyzable within the SCEV 3605 /// framework. This primarily includes integer types, and it can optionally 3606 /// include pointer types if the ScalarEvolution class has access to 3607 /// target-specific information. 3608 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3609 // Integers and pointers are always SCEVable. 3610 return Ty->isIntegerTy() || Ty->isPointerTy(); 3611 } 3612 3613 /// Return the size in bits of the specified type, for which isSCEVable must 3614 /// return true. 3615 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3616 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3617 if (Ty->isPointerTy()) 3618 return getDataLayout().getIndexTypeSizeInBits(Ty); 3619 return getDataLayout().getTypeSizeInBits(Ty); 3620 } 3621 3622 /// Return a type with the same bitwidth as the given type and which represents 3623 /// how SCEV will treat the given type, for which isSCEVable must return 3624 /// true. For pointer types, this is the pointer-sized integer type. 3625 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3626 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3627 3628 if (Ty->isIntegerTy()) 3629 return Ty; 3630 3631 // The only other support type is pointer. 3632 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3633 return getDataLayout().getIntPtrType(Ty); 3634 } 3635 3636 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3637 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3638 } 3639 3640 const SCEV *ScalarEvolution::getCouldNotCompute() { 3641 return CouldNotCompute.get(); 3642 } 3643 3644 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3645 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3646 auto *SU = dyn_cast<SCEVUnknown>(S); 3647 return SU && SU->getValue() == nullptr; 3648 }); 3649 3650 return !ContainsNulls; 3651 } 3652 3653 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3654 HasRecMapType::iterator I = HasRecMap.find(S); 3655 if (I != HasRecMap.end()) 3656 return I->second; 3657 3658 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3659 HasRecMap.insert({S, FoundAddRec}); 3660 return FoundAddRec; 3661 } 3662 3663 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3664 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3665 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3666 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3667 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3668 if (!Add) 3669 return {S, nullptr}; 3670 3671 if (Add->getNumOperands() != 2) 3672 return {S, nullptr}; 3673 3674 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3675 if (!ConstOp) 3676 return {S, nullptr}; 3677 3678 return {Add->getOperand(1), ConstOp->getValue()}; 3679 } 3680 3681 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3682 /// by the value and offset from any ValueOffsetPair in the set. 3683 SetVector<ScalarEvolution::ValueOffsetPair> * 3684 ScalarEvolution::getSCEVValues(const SCEV *S) { 3685 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3686 if (SI == ExprValueMap.end()) 3687 return nullptr; 3688 #ifndef NDEBUG 3689 if (VerifySCEVMap) { 3690 // Check there is no dangling Value in the set returned. 3691 for (const auto &VE : SI->second) 3692 assert(ValueExprMap.count(VE.first)); 3693 } 3694 #endif 3695 return &SI->second; 3696 } 3697 3698 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3699 /// cannot be used separately. eraseValueFromMap should be used to remove 3700 /// V from ValueExprMap and ExprValueMap at the same time. 3701 void ScalarEvolution::eraseValueFromMap(Value *V) { 3702 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3703 if (I != ValueExprMap.end()) { 3704 const SCEV *S = I->second; 3705 // Remove {V, 0} from the set of ExprValueMap[S] 3706 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3707 SV->remove({V, nullptr}); 3708 3709 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3710 const SCEV *Stripped; 3711 ConstantInt *Offset; 3712 std::tie(Stripped, Offset) = splitAddExpr(S); 3713 if (Offset != nullptr) { 3714 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3715 SV->remove({V, Offset}); 3716 } 3717 ValueExprMap.erase(V); 3718 } 3719 } 3720 3721 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3722 /// TODO: In reality it is better to check the poison recursevely 3723 /// but this is better than nothing. 3724 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3725 if (auto *I = dyn_cast<Instruction>(V)) { 3726 if (isa<OverflowingBinaryOperator>(I)) { 3727 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3728 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3729 return true; 3730 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3731 return true; 3732 } 3733 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3734 return true; 3735 } 3736 return false; 3737 } 3738 3739 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3740 /// create a new one. 3741 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3742 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3743 3744 const SCEV *S = getExistingSCEV(V); 3745 if (S == nullptr) { 3746 S = createSCEV(V); 3747 // During PHI resolution, it is possible to create two SCEVs for the same 3748 // V, so it is needed to double check whether V->S is inserted into 3749 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3750 std::pair<ValueExprMapType::iterator, bool> Pair = 3751 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3752 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3753 ExprValueMap[S].insert({V, nullptr}); 3754 3755 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3756 // ExprValueMap. 3757 const SCEV *Stripped = S; 3758 ConstantInt *Offset = nullptr; 3759 std::tie(Stripped, Offset) = splitAddExpr(S); 3760 // If stripped is SCEVUnknown, don't bother to save 3761 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3762 // increase the complexity of the expansion code. 3763 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3764 // because it may generate add/sub instead of GEP in SCEV expansion. 3765 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3766 !isa<GetElementPtrInst>(V)) 3767 ExprValueMap[Stripped].insert({V, Offset}); 3768 } 3769 } 3770 return S; 3771 } 3772 3773 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3774 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3775 3776 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3777 if (I != ValueExprMap.end()) { 3778 const SCEV *S = I->second; 3779 if (checkValidity(S)) 3780 return S; 3781 eraseValueFromMap(V); 3782 forgetMemoizedResults(S); 3783 } 3784 return nullptr; 3785 } 3786 3787 /// Return a SCEV corresponding to -V = -1*V 3788 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3789 SCEV::NoWrapFlags Flags) { 3790 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3791 return getConstant( 3792 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3793 3794 Type *Ty = V->getType(); 3795 Ty = getEffectiveSCEVType(Ty); 3796 return getMulExpr( 3797 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3798 } 3799 3800 /// Return a SCEV corresponding to ~V = -1-V 3801 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3802 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3803 return getConstant( 3804 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3805 3806 Type *Ty = V->getType(); 3807 Ty = getEffectiveSCEVType(Ty); 3808 const SCEV *AllOnes = 3809 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3810 return getMinusSCEV(AllOnes, V); 3811 } 3812 3813 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3814 SCEV::NoWrapFlags Flags, 3815 unsigned Depth) { 3816 // Fast path: X - X --> 0. 3817 if (LHS == RHS) 3818 return getZero(LHS->getType()); 3819 3820 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3821 // makes it so that we cannot make much use of NUW. 3822 auto AddFlags = SCEV::FlagAnyWrap; 3823 const bool RHSIsNotMinSigned = 3824 !getSignedRangeMin(RHS).isMinSignedValue(); 3825 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3826 // Let M be the minimum representable signed value. Then (-1)*RHS 3827 // signed-wraps if and only if RHS is M. That can happen even for 3828 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3829 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3830 // (-1)*RHS, we need to prove that RHS != M. 3831 // 3832 // If LHS is non-negative and we know that LHS - RHS does not 3833 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3834 // either by proving that RHS > M or that LHS >= 0. 3835 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3836 AddFlags = SCEV::FlagNSW; 3837 } 3838 } 3839 3840 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3841 // RHS is NSW and LHS >= 0. 3842 // 3843 // The difficulty here is that the NSW flag may have been proven 3844 // relative to a loop that is to be found in a recurrence in LHS and 3845 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3846 // larger scope than intended. 3847 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3848 3849 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3850 } 3851 3852 const SCEV * 3853 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3854 Type *SrcTy = V->getType(); 3855 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3856 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3857 "Cannot truncate or zero extend with non-integer arguments!"); 3858 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3859 return V; // No conversion 3860 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3861 return getTruncateExpr(V, Ty); 3862 return getZeroExtendExpr(V, Ty); 3863 } 3864 3865 const SCEV * 3866 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3867 Type *Ty) { 3868 Type *SrcTy = V->getType(); 3869 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3870 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3871 "Cannot truncate or zero extend with non-integer arguments!"); 3872 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3873 return V; // No conversion 3874 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3875 return getTruncateExpr(V, Ty); 3876 return getSignExtendExpr(V, Ty); 3877 } 3878 3879 const SCEV * 3880 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3881 Type *SrcTy = V->getType(); 3882 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3883 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3884 "Cannot noop or zero extend with non-integer arguments!"); 3885 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3886 "getNoopOrZeroExtend cannot truncate!"); 3887 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3888 return V; // No conversion 3889 return getZeroExtendExpr(V, Ty); 3890 } 3891 3892 const SCEV * 3893 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3894 Type *SrcTy = V->getType(); 3895 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3896 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3897 "Cannot noop or sign extend with non-integer arguments!"); 3898 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3899 "getNoopOrSignExtend cannot truncate!"); 3900 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3901 return V; // No conversion 3902 return getSignExtendExpr(V, Ty); 3903 } 3904 3905 const SCEV * 3906 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3907 Type *SrcTy = V->getType(); 3908 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3909 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3910 "Cannot noop or any extend with non-integer arguments!"); 3911 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3912 "getNoopOrAnyExtend cannot truncate!"); 3913 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3914 return V; // No conversion 3915 return getAnyExtendExpr(V, Ty); 3916 } 3917 3918 const SCEV * 3919 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3920 Type *SrcTy = V->getType(); 3921 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3922 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3923 "Cannot truncate or noop with non-integer arguments!"); 3924 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3925 "getTruncateOrNoop cannot extend!"); 3926 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3927 return V; // No conversion 3928 return getTruncateExpr(V, Ty); 3929 } 3930 3931 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3932 const SCEV *RHS) { 3933 const SCEV *PromotedLHS = LHS; 3934 const SCEV *PromotedRHS = RHS; 3935 3936 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3937 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3938 else 3939 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3940 3941 return getUMaxExpr(PromotedLHS, PromotedRHS); 3942 } 3943 3944 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3945 const SCEV *RHS) { 3946 const SCEV *PromotedLHS = LHS; 3947 const SCEV *PromotedRHS = RHS; 3948 3949 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3950 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3951 else 3952 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3953 3954 return getUMinExpr(PromotedLHS, PromotedRHS); 3955 } 3956 3957 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3958 // A pointer operand may evaluate to a nonpointer expression, such as null. 3959 if (!V->getType()->isPointerTy()) 3960 return V; 3961 3962 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3963 return getPointerBase(Cast->getOperand()); 3964 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3965 const SCEV *PtrOp = nullptr; 3966 for (const SCEV *NAryOp : NAry->operands()) { 3967 if (NAryOp->getType()->isPointerTy()) { 3968 // Cannot find the base of an expression with multiple pointer operands. 3969 if (PtrOp) 3970 return V; 3971 PtrOp = NAryOp; 3972 } 3973 } 3974 if (!PtrOp) 3975 return V; 3976 return getPointerBase(PtrOp); 3977 } 3978 return V; 3979 } 3980 3981 /// Push users of the given Instruction onto the given Worklist. 3982 static void 3983 PushDefUseChildren(Instruction *I, 3984 SmallVectorImpl<Instruction *> &Worklist) { 3985 // Push the def-use children onto the Worklist stack. 3986 for (User *U : I->users()) 3987 Worklist.push_back(cast<Instruction>(U)); 3988 } 3989 3990 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3991 SmallVector<Instruction *, 16> Worklist; 3992 PushDefUseChildren(PN, Worklist); 3993 3994 SmallPtrSet<Instruction *, 8> Visited; 3995 Visited.insert(PN); 3996 while (!Worklist.empty()) { 3997 Instruction *I = Worklist.pop_back_val(); 3998 if (!Visited.insert(I).second) 3999 continue; 4000 4001 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4002 if (It != ValueExprMap.end()) { 4003 const SCEV *Old = It->second; 4004 4005 // Short-circuit the def-use traversal if the symbolic name 4006 // ceases to appear in expressions. 4007 if (Old != SymName && !hasOperand(Old, SymName)) 4008 continue; 4009 4010 // SCEVUnknown for a PHI either means that it has an unrecognized 4011 // structure, it's a PHI that's in the progress of being computed 4012 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4013 // additional loop trip count information isn't going to change anything. 4014 // In the second case, createNodeForPHI will perform the necessary 4015 // updates on its own when it gets to that point. In the third, we do 4016 // want to forget the SCEVUnknown. 4017 if (!isa<PHINode>(I) || 4018 !isa<SCEVUnknown>(Old) || 4019 (I != PN && Old == SymName)) { 4020 eraseValueFromMap(It->first); 4021 forgetMemoizedResults(Old); 4022 } 4023 } 4024 4025 PushDefUseChildren(I, Worklist); 4026 } 4027 } 4028 4029 namespace { 4030 4031 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4032 /// expression in case its Loop is L. If it is not L then 4033 /// if IgnoreOtherLoops is true then use AddRec itself 4034 /// otherwise rewrite cannot be done. 4035 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4036 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4037 public: 4038 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4039 bool IgnoreOtherLoops = true) { 4040 SCEVInitRewriter Rewriter(L, SE); 4041 const SCEV *Result = Rewriter.visit(S); 4042 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4043 return SE.getCouldNotCompute(); 4044 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4045 ? SE.getCouldNotCompute() 4046 : Result; 4047 } 4048 4049 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4050 if (!SE.isLoopInvariant(Expr, L)) 4051 SeenLoopVariantSCEVUnknown = true; 4052 return Expr; 4053 } 4054 4055 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4056 // Only re-write AddRecExprs for this loop. 4057 if (Expr->getLoop() == L) 4058 return Expr->getStart(); 4059 SeenOtherLoops = true; 4060 return Expr; 4061 } 4062 4063 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4064 4065 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4066 4067 private: 4068 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4069 : SCEVRewriteVisitor(SE), L(L) {} 4070 4071 const Loop *L; 4072 bool SeenLoopVariantSCEVUnknown = false; 4073 bool SeenOtherLoops = false; 4074 }; 4075 4076 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4077 /// increment expression in case its Loop is L. If it is not L then 4078 /// use AddRec itself. 4079 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4080 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4081 public: 4082 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4083 SCEVPostIncRewriter Rewriter(L, SE); 4084 const SCEV *Result = Rewriter.visit(S); 4085 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4086 ? SE.getCouldNotCompute() 4087 : Result; 4088 } 4089 4090 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4091 if (!SE.isLoopInvariant(Expr, L)) 4092 SeenLoopVariantSCEVUnknown = true; 4093 return Expr; 4094 } 4095 4096 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4097 // Only re-write AddRecExprs for this loop. 4098 if (Expr->getLoop() == L) 4099 return Expr->getPostIncExpr(SE); 4100 SeenOtherLoops = true; 4101 return Expr; 4102 } 4103 4104 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4105 4106 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4107 4108 private: 4109 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4110 : SCEVRewriteVisitor(SE), L(L) {} 4111 4112 const Loop *L; 4113 bool SeenLoopVariantSCEVUnknown = false; 4114 bool SeenOtherLoops = false; 4115 }; 4116 4117 /// This class evaluates the compare condition by matching it against the 4118 /// condition of loop latch. If there is a match we assume a true value 4119 /// for the condition while building SCEV nodes. 4120 class SCEVBackedgeConditionFolder 4121 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4122 public: 4123 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4124 ScalarEvolution &SE) { 4125 bool IsPosBECond = false; 4126 Value *BECond = nullptr; 4127 if (BasicBlock *Latch = L->getLoopLatch()) { 4128 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4129 if (BI && BI->isConditional()) { 4130 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4131 "Both outgoing branches should not target same header!"); 4132 BECond = BI->getCondition(); 4133 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4134 } else { 4135 return S; 4136 } 4137 } 4138 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4139 return Rewriter.visit(S); 4140 } 4141 4142 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4143 const SCEV *Result = Expr; 4144 bool InvariantF = SE.isLoopInvariant(Expr, L); 4145 4146 if (!InvariantF) { 4147 Instruction *I = cast<Instruction>(Expr->getValue()); 4148 switch (I->getOpcode()) { 4149 case Instruction::Select: { 4150 SelectInst *SI = cast<SelectInst>(I); 4151 Optional<const SCEV *> Res = 4152 compareWithBackedgeCondition(SI->getCondition()); 4153 if (Res.hasValue()) { 4154 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4155 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4156 } 4157 break; 4158 } 4159 default: { 4160 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4161 if (Res.hasValue()) 4162 Result = Res.getValue(); 4163 break; 4164 } 4165 } 4166 } 4167 return Result; 4168 } 4169 4170 private: 4171 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4172 bool IsPosBECond, ScalarEvolution &SE) 4173 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4174 IsPositiveBECond(IsPosBECond) {} 4175 4176 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4177 4178 const Loop *L; 4179 /// Loop back condition. 4180 Value *BackedgeCond = nullptr; 4181 /// Set to true if loop back is on positive branch condition. 4182 bool IsPositiveBECond; 4183 }; 4184 4185 Optional<const SCEV *> 4186 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4187 4188 // If value matches the backedge condition for loop latch, 4189 // then return a constant evolution node based on loopback 4190 // branch taken. 4191 if (BackedgeCond == IC) 4192 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4193 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4194 return None; 4195 } 4196 4197 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4198 public: 4199 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4200 ScalarEvolution &SE) { 4201 SCEVShiftRewriter Rewriter(L, SE); 4202 const SCEV *Result = Rewriter.visit(S); 4203 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4204 } 4205 4206 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4207 // Only allow AddRecExprs for this loop. 4208 if (!SE.isLoopInvariant(Expr, L)) 4209 Valid = false; 4210 return Expr; 4211 } 4212 4213 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4214 if (Expr->getLoop() == L && Expr->isAffine()) 4215 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4216 Valid = false; 4217 return Expr; 4218 } 4219 4220 bool isValid() { return Valid; } 4221 4222 private: 4223 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4224 : SCEVRewriteVisitor(SE), L(L) {} 4225 4226 const Loop *L; 4227 bool Valid = true; 4228 }; 4229 4230 } // end anonymous namespace 4231 4232 SCEV::NoWrapFlags 4233 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4234 if (!AR->isAffine()) 4235 return SCEV::FlagAnyWrap; 4236 4237 using OBO = OverflowingBinaryOperator; 4238 4239 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4240 4241 if (!AR->hasNoSignedWrap()) { 4242 ConstantRange AddRecRange = getSignedRange(AR); 4243 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4244 4245 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4246 Instruction::Add, IncRange, OBO::NoSignedWrap); 4247 if (NSWRegion.contains(AddRecRange)) 4248 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4249 } 4250 4251 if (!AR->hasNoUnsignedWrap()) { 4252 ConstantRange AddRecRange = getUnsignedRange(AR); 4253 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4254 4255 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4256 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4257 if (NUWRegion.contains(AddRecRange)) 4258 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4259 } 4260 4261 return Result; 4262 } 4263 4264 namespace { 4265 4266 /// Represents an abstract binary operation. This may exist as a 4267 /// normal instruction or constant expression, or may have been 4268 /// derived from an expression tree. 4269 struct BinaryOp { 4270 unsigned Opcode; 4271 Value *LHS; 4272 Value *RHS; 4273 bool IsNSW = false; 4274 bool IsNUW = false; 4275 4276 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4277 /// constant expression. 4278 Operator *Op = nullptr; 4279 4280 explicit BinaryOp(Operator *Op) 4281 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4282 Op(Op) { 4283 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4284 IsNSW = OBO->hasNoSignedWrap(); 4285 IsNUW = OBO->hasNoUnsignedWrap(); 4286 } 4287 } 4288 4289 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4290 bool IsNUW = false) 4291 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4292 }; 4293 4294 } // end anonymous namespace 4295 4296 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4297 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4298 auto *Op = dyn_cast<Operator>(V); 4299 if (!Op) 4300 return None; 4301 4302 // Implementation detail: all the cleverness here should happen without 4303 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4304 // SCEV expressions when possible, and we should not break that. 4305 4306 switch (Op->getOpcode()) { 4307 case Instruction::Add: 4308 case Instruction::Sub: 4309 case Instruction::Mul: 4310 case Instruction::UDiv: 4311 case Instruction::URem: 4312 case Instruction::And: 4313 case Instruction::Or: 4314 case Instruction::AShr: 4315 case Instruction::Shl: 4316 return BinaryOp(Op); 4317 4318 case Instruction::Xor: 4319 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4320 // If the RHS of the xor is a signmask, then this is just an add. 4321 // Instcombine turns add of signmask into xor as a strength reduction step. 4322 if (RHSC->getValue().isSignMask()) 4323 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4324 return BinaryOp(Op); 4325 4326 case Instruction::LShr: 4327 // Turn logical shift right of a constant into a unsigned divide. 4328 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4329 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4330 4331 // If the shift count is not less than the bitwidth, the result of 4332 // the shift is undefined. Don't try to analyze it, because the 4333 // resolution chosen here may differ from the resolution chosen in 4334 // other parts of the compiler. 4335 if (SA->getValue().ult(BitWidth)) { 4336 Constant *X = 4337 ConstantInt::get(SA->getContext(), 4338 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4339 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4340 } 4341 } 4342 return BinaryOp(Op); 4343 4344 case Instruction::ExtractValue: { 4345 auto *EVI = cast<ExtractValueInst>(Op); 4346 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4347 break; 4348 4349 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4350 if (!CI) 4351 break; 4352 4353 if (auto *F = CI->getCalledFunction()) 4354 switch (F->getIntrinsicID()) { 4355 case Intrinsic::sadd_with_overflow: 4356 case Intrinsic::uadd_with_overflow: 4357 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4358 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4359 CI->getArgOperand(1)); 4360 4361 // Now that we know that all uses of the arithmetic-result component of 4362 // CI are guarded by the overflow check, we can go ahead and pretend 4363 // that the arithmetic is non-overflowing. 4364 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4365 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4366 CI->getArgOperand(1), /* IsNSW = */ true, 4367 /* IsNUW = */ false); 4368 else 4369 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4370 CI->getArgOperand(1), /* IsNSW = */ false, 4371 /* IsNUW*/ true); 4372 case Intrinsic::ssub_with_overflow: 4373 case Intrinsic::usub_with_overflow: 4374 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4375 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4376 CI->getArgOperand(1)); 4377 4378 // The same reasoning as sadd/uadd above. 4379 if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) 4380 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4381 CI->getArgOperand(1), /* IsNSW = */ true, 4382 /* IsNUW = */ false); 4383 else 4384 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4385 CI->getArgOperand(1), /* IsNSW = */ false, 4386 /* IsNUW = */ true); 4387 case Intrinsic::smul_with_overflow: 4388 case Intrinsic::umul_with_overflow: 4389 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4390 CI->getArgOperand(1)); 4391 default: 4392 break; 4393 } 4394 break; 4395 } 4396 4397 default: 4398 break; 4399 } 4400 4401 return None; 4402 } 4403 4404 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4405 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4406 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4407 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4408 /// follows one of the following patterns: 4409 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4410 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4411 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4412 /// we return the type of the truncation operation, and indicate whether the 4413 /// truncated type should be treated as signed/unsigned by setting 4414 /// \p Signed to true/false, respectively. 4415 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4416 bool &Signed, ScalarEvolution &SE) { 4417 // The case where Op == SymbolicPHI (that is, with no type conversions on 4418 // the way) is handled by the regular add recurrence creating logic and 4419 // would have already been triggered in createAddRecForPHI. Reaching it here 4420 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4421 // because one of the other operands of the SCEVAddExpr updating this PHI is 4422 // not invariant). 4423 // 4424 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4425 // this case predicates that allow us to prove that Op == SymbolicPHI will 4426 // be added. 4427 if (Op == SymbolicPHI) 4428 return nullptr; 4429 4430 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4431 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4432 if (SourceBits != NewBits) 4433 return nullptr; 4434 4435 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4436 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4437 if (!SExt && !ZExt) 4438 return nullptr; 4439 const SCEVTruncateExpr *Trunc = 4440 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4441 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4442 if (!Trunc) 4443 return nullptr; 4444 const SCEV *X = Trunc->getOperand(); 4445 if (X != SymbolicPHI) 4446 return nullptr; 4447 Signed = SExt != nullptr; 4448 return Trunc->getType(); 4449 } 4450 4451 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4452 if (!PN->getType()->isIntegerTy()) 4453 return nullptr; 4454 const Loop *L = LI.getLoopFor(PN->getParent()); 4455 if (!L || L->getHeader() != PN->getParent()) 4456 return nullptr; 4457 return L; 4458 } 4459 4460 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4461 // computation that updates the phi follows the following pattern: 4462 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4463 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4464 // If so, try to see if it can be rewritten as an AddRecExpr under some 4465 // Predicates. If successful, return them as a pair. Also cache the results 4466 // of the analysis. 4467 // 4468 // Example usage scenario: 4469 // Say the Rewriter is called for the following SCEV: 4470 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4471 // where: 4472 // %X = phi i64 (%Start, %BEValue) 4473 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4474 // and call this function with %SymbolicPHI = %X. 4475 // 4476 // The analysis will find that the value coming around the backedge has 4477 // the following SCEV: 4478 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4479 // Upon concluding that this matches the desired pattern, the function 4480 // will return the pair {NewAddRec, SmallPredsVec} where: 4481 // NewAddRec = {%Start,+,%Step} 4482 // SmallPredsVec = {P1, P2, P3} as follows: 4483 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4484 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4485 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4486 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4487 // under the predicates {P1,P2,P3}. 4488 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4489 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4490 // 4491 // TODO's: 4492 // 4493 // 1) Extend the Induction descriptor to also support inductions that involve 4494 // casts: When needed (namely, when we are called in the context of the 4495 // vectorizer induction analysis), a Set of cast instructions will be 4496 // populated by this method, and provided back to isInductionPHI. This is 4497 // needed to allow the vectorizer to properly record them to be ignored by 4498 // the cost model and to avoid vectorizing them (otherwise these casts, 4499 // which are redundant under the runtime overflow checks, will be 4500 // vectorized, which can be costly). 4501 // 4502 // 2) Support additional induction/PHISCEV patterns: We also want to support 4503 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4504 // after the induction update operation (the induction increment): 4505 // 4506 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4507 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4508 // 4509 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4510 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4511 // 4512 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4513 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4514 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4515 SmallVector<const SCEVPredicate *, 3> Predicates; 4516 4517 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4518 // return an AddRec expression under some predicate. 4519 4520 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4521 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4522 assert(L && "Expecting an integer loop header phi"); 4523 4524 // The loop may have multiple entrances or multiple exits; we can analyze 4525 // this phi as an addrec if it has a unique entry value and a unique 4526 // backedge value. 4527 Value *BEValueV = nullptr, *StartValueV = nullptr; 4528 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4529 Value *V = PN->getIncomingValue(i); 4530 if (L->contains(PN->getIncomingBlock(i))) { 4531 if (!BEValueV) { 4532 BEValueV = V; 4533 } else if (BEValueV != V) { 4534 BEValueV = nullptr; 4535 break; 4536 } 4537 } else if (!StartValueV) { 4538 StartValueV = V; 4539 } else if (StartValueV != V) { 4540 StartValueV = nullptr; 4541 break; 4542 } 4543 } 4544 if (!BEValueV || !StartValueV) 4545 return None; 4546 4547 const SCEV *BEValue = getSCEV(BEValueV); 4548 4549 // If the value coming around the backedge is an add with the symbolic 4550 // value we just inserted, possibly with casts that we can ignore under 4551 // an appropriate runtime guard, then we found a simple induction variable! 4552 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4553 if (!Add) 4554 return None; 4555 4556 // If there is a single occurrence of the symbolic value, possibly 4557 // casted, replace it with a recurrence. 4558 unsigned FoundIndex = Add->getNumOperands(); 4559 Type *TruncTy = nullptr; 4560 bool Signed; 4561 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4562 if ((TruncTy = 4563 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4564 if (FoundIndex == e) { 4565 FoundIndex = i; 4566 break; 4567 } 4568 4569 if (FoundIndex == Add->getNumOperands()) 4570 return None; 4571 4572 // Create an add with everything but the specified operand. 4573 SmallVector<const SCEV *, 8> Ops; 4574 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4575 if (i != FoundIndex) 4576 Ops.push_back(Add->getOperand(i)); 4577 const SCEV *Accum = getAddExpr(Ops); 4578 4579 // The runtime checks will not be valid if the step amount is 4580 // varying inside the loop. 4581 if (!isLoopInvariant(Accum, L)) 4582 return None; 4583 4584 // *** Part2: Create the predicates 4585 4586 // Analysis was successful: we have a phi-with-cast pattern for which we 4587 // can return an AddRec expression under the following predicates: 4588 // 4589 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4590 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4591 // P2: An Equal predicate that guarantees that 4592 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4593 // P3: An Equal predicate that guarantees that 4594 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4595 // 4596 // As we next prove, the above predicates guarantee that: 4597 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4598 // 4599 // 4600 // More formally, we want to prove that: 4601 // Expr(i+1) = Start + (i+1) * Accum 4602 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4603 // 4604 // Given that: 4605 // 1) Expr(0) = Start 4606 // 2) Expr(1) = Start + Accum 4607 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4608 // 3) Induction hypothesis (step i): 4609 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4610 // 4611 // Proof: 4612 // Expr(i+1) = 4613 // = Start + (i+1)*Accum 4614 // = (Start + i*Accum) + Accum 4615 // = Expr(i) + Accum 4616 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4617 // :: from step i 4618 // 4619 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4620 // 4621 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4622 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4623 // + Accum :: from P3 4624 // 4625 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4626 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4627 // 4628 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4629 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4630 // 4631 // By induction, the same applies to all iterations 1<=i<n: 4632 // 4633 4634 // Create a truncated addrec for which we will add a no overflow check (P1). 4635 const SCEV *StartVal = getSCEV(StartValueV); 4636 const SCEV *PHISCEV = 4637 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4638 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4639 4640 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4641 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4642 // will be constant. 4643 // 4644 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4645 // add P1. 4646 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4647 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4648 Signed ? SCEVWrapPredicate::IncrementNSSW 4649 : SCEVWrapPredicate::IncrementNUSW; 4650 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4651 Predicates.push_back(AddRecPred); 4652 } 4653 4654 // Create the Equal Predicates P2,P3: 4655 4656 // It is possible that the predicates P2 and/or P3 are computable at 4657 // compile time due to StartVal and/or Accum being constants. 4658 // If either one is, then we can check that now and escape if either P2 4659 // or P3 is false. 4660 4661 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4662 // for each of StartVal and Accum 4663 auto getExtendedExpr = [&](const SCEV *Expr, 4664 bool CreateSignExtend) -> const SCEV * { 4665 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4666 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4667 const SCEV *ExtendedExpr = 4668 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4669 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4670 return ExtendedExpr; 4671 }; 4672 4673 // Given: 4674 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4675 // = getExtendedExpr(Expr) 4676 // Determine whether the predicate P: Expr == ExtendedExpr 4677 // is known to be false at compile time 4678 auto PredIsKnownFalse = [&](const SCEV *Expr, 4679 const SCEV *ExtendedExpr) -> bool { 4680 return Expr != ExtendedExpr && 4681 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4682 }; 4683 4684 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4685 if (PredIsKnownFalse(StartVal, StartExtended)) { 4686 DEBUG(dbgs() << "P2 is compile-time false\n";); 4687 return None; 4688 } 4689 4690 // The Step is always Signed (because the overflow checks are either 4691 // NSSW or NUSW) 4692 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4693 if (PredIsKnownFalse(Accum, AccumExtended)) { 4694 DEBUG(dbgs() << "P3 is compile-time false\n";); 4695 return None; 4696 } 4697 4698 auto AppendPredicate = [&](const SCEV *Expr, 4699 const SCEV *ExtendedExpr) -> void { 4700 if (Expr != ExtendedExpr && 4701 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4702 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4703 DEBUG (dbgs() << "Added Predicate: " << *Pred); 4704 Predicates.push_back(Pred); 4705 } 4706 }; 4707 4708 AppendPredicate(StartVal, StartExtended); 4709 AppendPredicate(Accum, AccumExtended); 4710 4711 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4712 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4713 // into NewAR if it will also add the runtime overflow checks specified in 4714 // Predicates. 4715 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4716 4717 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4718 std::make_pair(NewAR, Predicates); 4719 // Remember the result of the analysis for this SCEV at this locayyytion. 4720 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4721 return PredRewrite; 4722 } 4723 4724 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4725 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4726 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4727 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4728 if (!L) 4729 return None; 4730 4731 // Check to see if we already analyzed this PHI. 4732 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4733 if (I != PredicatedSCEVRewrites.end()) { 4734 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4735 I->second; 4736 // Analysis was done before and failed to create an AddRec: 4737 if (Rewrite.first == SymbolicPHI) 4738 return None; 4739 // Analysis was done before and succeeded to create an AddRec under 4740 // a predicate: 4741 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4742 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4743 return Rewrite; 4744 } 4745 4746 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4747 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4748 4749 // Record in the cache that the analysis failed 4750 if (!Rewrite) { 4751 SmallVector<const SCEVPredicate *, 3> Predicates; 4752 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4753 return None; 4754 } 4755 4756 return Rewrite; 4757 } 4758 4759 // FIXME: This utility is currently required because the Rewriter currently 4760 // does not rewrite this expression: 4761 // {0, +, (sext ix (trunc iy to ix) to iy)} 4762 // into {0, +, %step}, 4763 // even when the following Equal predicate exists: 4764 // "%step == (sext ix (trunc iy to ix) to iy)". 4765 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4766 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4767 if (AR1 == AR2) 4768 return true; 4769 4770 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4771 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4772 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4773 return false; 4774 return true; 4775 }; 4776 4777 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4778 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4779 return false; 4780 return true; 4781 } 4782 4783 /// A helper function for createAddRecFromPHI to handle simple cases. 4784 /// 4785 /// This function tries to find an AddRec expression for the simplest (yet most 4786 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4787 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4788 /// technique for finding the AddRec expression. 4789 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4790 Value *BEValueV, 4791 Value *StartValueV) { 4792 const Loop *L = LI.getLoopFor(PN->getParent()); 4793 assert(L && L->getHeader() == PN->getParent()); 4794 assert(BEValueV && StartValueV); 4795 4796 auto BO = MatchBinaryOp(BEValueV, DT); 4797 if (!BO) 4798 return nullptr; 4799 4800 if (BO->Opcode != Instruction::Add) 4801 return nullptr; 4802 4803 const SCEV *Accum = nullptr; 4804 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4805 Accum = getSCEV(BO->RHS); 4806 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4807 Accum = getSCEV(BO->LHS); 4808 4809 if (!Accum) 4810 return nullptr; 4811 4812 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4813 if (BO->IsNUW) 4814 Flags = setFlags(Flags, SCEV::FlagNUW); 4815 if (BO->IsNSW) 4816 Flags = setFlags(Flags, SCEV::FlagNSW); 4817 4818 const SCEV *StartVal = getSCEV(StartValueV); 4819 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4820 4821 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4822 4823 // We can add Flags to the post-inc expression only if we 4824 // know that it is *undefined behavior* for BEValueV to 4825 // overflow. 4826 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4827 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4828 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4829 4830 return PHISCEV; 4831 } 4832 4833 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4834 const Loop *L = LI.getLoopFor(PN->getParent()); 4835 if (!L || L->getHeader() != PN->getParent()) 4836 return nullptr; 4837 4838 // The loop may have multiple entrances or multiple exits; we can analyze 4839 // this phi as an addrec if it has a unique entry value and a unique 4840 // backedge value. 4841 Value *BEValueV = nullptr, *StartValueV = nullptr; 4842 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4843 Value *V = PN->getIncomingValue(i); 4844 if (L->contains(PN->getIncomingBlock(i))) { 4845 if (!BEValueV) { 4846 BEValueV = V; 4847 } else if (BEValueV != V) { 4848 BEValueV = nullptr; 4849 break; 4850 } 4851 } else if (!StartValueV) { 4852 StartValueV = V; 4853 } else if (StartValueV != V) { 4854 StartValueV = nullptr; 4855 break; 4856 } 4857 } 4858 if (!BEValueV || !StartValueV) 4859 return nullptr; 4860 4861 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4862 "PHI node already processed?"); 4863 4864 // First, try to find AddRec expression without creating a fictituos symbolic 4865 // value for PN. 4866 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4867 return S; 4868 4869 // Handle PHI node value symbolically. 4870 const SCEV *SymbolicName = getUnknown(PN); 4871 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4872 4873 // Using this symbolic name for the PHI, analyze the value coming around 4874 // the back-edge. 4875 const SCEV *BEValue = getSCEV(BEValueV); 4876 4877 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4878 // has a special value for the first iteration of the loop. 4879 4880 // If the value coming around the backedge is an add with the symbolic 4881 // value we just inserted, then we found a simple induction variable! 4882 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4883 // If there is a single occurrence of the symbolic value, replace it 4884 // with a recurrence. 4885 unsigned FoundIndex = Add->getNumOperands(); 4886 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4887 if (Add->getOperand(i) == SymbolicName) 4888 if (FoundIndex == e) { 4889 FoundIndex = i; 4890 break; 4891 } 4892 4893 if (FoundIndex != Add->getNumOperands()) { 4894 // Create an add with everything but the specified operand. 4895 SmallVector<const SCEV *, 8> Ops; 4896 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4897 if (i != FoundIndex) 4898 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 4899 L, *this)); 4900 const SCEV *Accum = getAddExpr(Ops); 4901 4902 // This is not a valid addrec if the step amount is varying each 4903 // loop iteration, but is not itself an addrec in this loop. 4904 if (isLoopInvariant(Accum, L) || 4905 (isa<SCEVAddRecExpr>(Accum) && 4906 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4907 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4908 4909 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4910 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4911 if (BO->IsNUW) 4912 Flags = setFlags(Flags, SCEV::FlagNUW); 4913 if (BO->IsNSW) 4914 Flags = setFlags(Flags, SCEV::FlagNSW); 4915 } 4916 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4917 // If the increment is an inbounds GEP, then we know the address 4918 // space cannot be wrapped around. We cannot make any guarantee 4919 // about signed or unsigned overflow because pointers are 4920 // unsigned but we may have a negative index from the base 4921 // pointer. We can guarantee that no unsigned wrap occurs if the 4922 // indices form a positive value. 4923 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4924 Flags = setFlags(Flags, SCEV::FlagNW); 4925 4926 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4927 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4928 Flags = setFlags(Flags, SCEV::FlagNUW); 4929 } 4930 4931 // We cannot transfer nuw and nsw flags from subtraction 4932 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4933 // for instance. 4934 } 4935 4936 const SCEV *StartVal = getSCEV(StartValueV); 4937 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4938 4939 // Okay, for the entire analysis of this edge we assumed the PHI 4940 // to be symbolic. We now need to go back and purge all of the 4941 // entries for the scalars that use the symbolic expression. 4942 forgetSymbolicName(PN, SymbolicName); 4943 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4944 4945 // We can add Flags to the post-inc expression only if we 4946 // know that it is *undefined behavior* for BEValueV to 4947 // overflow. 4948 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4949 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4950 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4951 4952 return PHISCEV; 4953 } 4954 } 4955 } else { 4956 // Otherwise, this could be a loop like this: 4957 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4958 // In this case, j = {1,+,1} and BEValue is j. 4959 // Because the other in-value of i (0) fits the evolution of BEValue 4960 // i really is an addrec evolution. 4961 // 4962 // We can generalize this saying that i is the shifted value of BEValue 4963 // by one iteration: 4964 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4965 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4966 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 4967 if (Shifted != getCouldNotCompute() && 4968 Start != getCouldNotCompute()) { 4969 const SCEV *StartVal = getSCEV(StartValueV); 4970 if (Start == StartVal) { 4971 // Okay, for the entire analysis of this edge we assumed the PHI 4972 // to be symbolic. We now need to go back and purge all of the 4973 // entries for the scalars that use the symbolic expression. 4974 forgetSymbolicName(PN, SymbolicName); 4975 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4976 return Shifted; 4977 } 4978 } 4979 } 4980 4981 // Remove the temporary PHI node SCEV that has been inserted while intending 4982 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4983 // as it will prevent later (possibly simpler) SCEV expressions to be added 4984 // to the ValueExprMap. 4985 eraseValueFromMap(PN); 4986 4987 return nullptr; 4988 } 4989 4990 // Checks if the SCEV S is available at BB. S is considered available at BB 4991 // if S can be materialized at BB without introducing a fault. 4992 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4993 BasicBlock *BB) { 4994 struct CheckAvailable { 4995 bool TraversalDone = false; 4996 bool Available = true; 4997 4998 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4999 BasicBlock *BB = nullptr; 5000 DominatorTree &DT; 5001 5002 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5003 : L(L), BB(BB), DT(DT) {} 5004 5005 bool setUnavailable() { 5006 TraversalDone = true; 5007 Available = false; 5008 return false; 5009 } 5010 5011 bool follow(const SCEV *S) { 5012 switch (S->getSCEVType()) { 5013 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5014 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5015 // These expressions are available if their operand(s) is/are. 5016 return true; 5017 5018 case scAddRecExpr: { 5019 // We allow add recurrences that are on the loop BB is in, or some 5020 // outer loop. This guarantees availability because the value of the 5021 // add recurrence at BB is simply the "current" value of the induction 5022 // variable. We can relax this in the future; for instance an add 5023 // recurrence on a sibling dominating loop is also available at BB. 5024 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5025 if (L && (ARLoop == L || ARLoop->contains(L))) 5026 return true; 5027 5028 return setUnavailable(); 5029 } 5030 5031 case scUnknown: { 5032 // For SCEVUnknown, we check for simple dominance. 5033 const auto *SU = cast<SCEVUnknown>(S); 5034 Value *V = SU->getValue(); 5035 5036 if (isa<Argument>(V)) 5037 return false; 5038 5039 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5040 return false; 5041 5042 return setUnavailable(); 5043 } 5044 5045 case scUDivExpr: 5046 case scCouldNotCompute: 5047 // We do not try to smart about these at all. 5048 return setUnavailable(); 5049 } 5050 llvm_unreachable("switch should be fully covered!"); 5051 } 5052 5053 bool isDone() { return TraversalDone; } 5054 }; 5055 5056 CheckAvailable CA(L, BB, DT); 5057 SCEVTraversal<CheckAvailable> ST(CA); 5058 5059 ST.visitAll(S); 5060 return CA.Available; 5061 } 5062 5063 // Try to match a control flow sequence that branches out at BI and merges back 5064 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5065 // match. 5066 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5067 Value *&C, Value *&LHS, Value *&RHS) { 5068 C = BI->getCondition(); 5069 5070 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5071 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5072 5073 if (!LeftEdge.isSingleEdge()) 5074 return false; 5075 5076 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5077 5078 Use &LeftUse = Merge->getOperandUse(0); 5079 Use &RightUse = Merge->getOperandUse(1); 5080 5081 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5082 LHS = LeftUse; 5083 RHS = RightUse; 5084 return true; 5085 } 5086 5087 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5088 LHS = RightUse; 5089 RHS = LeftUse; 5090 return true; 5091 } 5092 5093 return false; 5094 } 5095 5096 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5097 auto IsReachable = 5098 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5099 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5100 const Loop *L = LI.getLoopFor(PN->getParent()); 5101 5102 // We don't want to break LCSSA, even in a SCEV expression tree. 5103 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5104 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5105 return nullptr; 5106 5107 // Try to match 5108 // 5109 // br %cond, label %left, label %right 5110 // left: 5111 // br label %merge 5112 // right: 5113 // br label %merge 5114 // merge: 5115 // V = phi [ %x, %left ], [ %y, %right ] 5116 // 5117 // as "select %cond, %x, %y" 5118 5119 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5120 assert(IDom && "At least the entry block should dominate PN"); 5121 5122 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5123 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5124 5125 if (BI && BI->isConditional() && 5126 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5127 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5128 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5129 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5130 } 5131 5132 return nullptr; 5133 } 5134 5135 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5136 if (const SCEV *S = createAddRecFromPHI(PN)) 5137 return S; 5138 5139 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5140 return S; 5141 5142 // If the PHI has a single incoming value, follow that value, unless the 5143 // PHI's incoming blocks are in a different loop, in which case doing so 5144 // risks breaking LCSSA form. Instcombine would normally zap these, but 5145 // it doesn't have DominatorTree information, so it may miss cases. 5146 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5147 if (LI.replacementPreservesLCSSAForm(PN, V)) 5148 return getSCEV(V); 5149 5150 // If it's not a loop phi, we can't handle it yet. 5151 return getUnknown(PN); 5152 } 5153 5154 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5155 Value *Cond, 5156 Value *TrueVal, 5157 Value *FalseVal) { 5158 // Handle "constant" branch or select. This can occur for instance when a 5159 // loop pass transforms an inner loop and moves on to process the outer loop. 5160 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5161 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5162 5163 // Try to match some simple smax or umax patterns. 5164 auto *ICI = dyn_cast<ICmpInst>(Cond); 5165 if (!ICI) 5166 return getUnknown(I); 5167 5168 Value *LHS = ICI->getOperand(0); 5169 Value *RHS = ICI->getOperand(1); 5170 5171 switch (ICI->getPredicate()) { 5172 case ICmpInst::ICMP_SLT: 5173 case ICmpInst::ICMP_SLE: 5174 std::swap(LHS, RHS); 5175 LLVM_FALLTHROUGH; 5176 case ICmpInst::ICMP_SGT: 5177 case ICmpInst::ICMP_SGE: 5178 // a >s b ? a+x : b+x -> smax(a, b)+x 5179 // a >s b ? b+x : a+x -> smin(a, b)+x 5180 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5181 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5182 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5183 const SCEV *LA = getSCEV(TrueVal); 5184 const SCEV *RA = getSCEV(FalseVal); 5185 const SCEV *LDiff = getMinusSCEV(LA, LS); 5186 const SCEV *RDiff = getMinusSCEV(RA, RS); 5187 if (LDiff == RDiff) 5188 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5189 LDiff = getMinusSCEV(LA, RS); 5190 RDiff = getMinusSCEV(RA, LS); 5191 if (LDiff == RDiff) 5192 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5193 } 5194 break; 5195 case ICmpInst::ICMP_ULT: 5196 case ICmpInst::ICMP_ULE: 5197 std::swap(LHS, RHS); 5198 LLVM_FALLTHROUGH; 5199 case ICmpInst::ICMP_UGT: 5200 case ICmpInst::ICMP_UGE: 5201 // a >u b ? a+x : b+x -> umax(a, b)+x 5202 // a >u b ? b+x : a+x -> umin(a, b)+x 5203 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5204 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5205 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5206 const SCEV *LA = getSCEV(TrueVal); 5207 const SCEV *RA = getSCEV(FalseVal); 5208 const SCEV *LDiff = getMinusSCEV(LA, LS); 5209 const SCEV *RDiff = getMinusSCEV(RA, RS); 5210 if (LDiff == RDiff) 5211 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5212 LDiff = getMinusSCEV(LA, RS); 5213 RDiff = getMinusSCEV(RA, LS); 5214 if (LDiff == RDiff) 5215 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5216 } 5217 break; 5218 case ICmpInst::ICMP_NE: 5219 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5220 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5221 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5222 const SCEV *One = getOne(I->getType()); 5223 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5224 const SCEV *LA = getSCEV(TrueVal); 5225 const SCEV *RA = getSCEV(FalseVal); 5226 const SCEV *LDiff = getMinusSCEV(LA, LS); 5227 const SCEV *RDiff = getMinusSCEV(RA, One); 5228 if (LDiff == RDiff) 5229 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5230 } 5231 break; 5232 case ICmpInst::ICMP_EQ: 5233 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5234 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5235 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5236 const SCEV *One = getOne(I->getType()); 5237 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5238 const SCEV *LA = getSCEV(TrueVal); 5239 const SCEV *RA = getSCEV(FalseVal); 5240 const SCEV *LDiff = getMinusSCEV(LA, One); 5241 const SCEV *RDiff = getMinusSCEV(RA, LS); 5242 if (LDiff == RDiff) 5243 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5244 } 5245 break; 5246 default: 5247 break; 5248 } 5249 5250 return getUnknown(I); 5251 } 5252 5253 /// Expand GEP instructions into add and multiply operations. This allows them 5254 /// to be analyzed by regular SCEV code. 5255 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5256 // Don't attempt to analyze GEPs over unsized objects. 5257 if (!GEP->getSourceElementType()->isSized()) 5258 return getUnknown(GEP); 5259 5260 SmallVector<const SCEV *, 4> IndexExprs; 5261 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5262 IndexExprs.push_back(getSCEV(*Index)); 5263 return getGEPExpr(GEP, IndexExprs); 5264 } 5265 5266 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5267 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5268 return C->getAPInt().countTrailingZeros(); 5269 5270 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5271 return std::min(GetMinTrailingZeros(T->getOperand()), 5272 (uint32_t)getTypeSizeInBits(T->getType())); 5273 5274 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5275 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5276 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5277 ? getTypeSizeInBits(E->getType()) 5278 : OpRes; 5279 } 5280 5281 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5282 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5283 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5284 ? getTypeSizeInBits(E->getType()) 5285 : OpRes; 5286 } 5287 5288 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5289 // The result is the min of all operands results. 5290 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5291 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5292 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5293 return MinOpRes; 5294 } 5295 5296 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5297 // The result is the sum of all operands results. 5298 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5299 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5300 for (unsigned i = 1, e = M->getNumOperands(); 5301 SumOpRes != BitWidth && i != e; ++i) 5302 SumOpRes = 5303 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5304 return SumOpRes; 5305 } 5306 5307 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5308 // The result is the min of all operands results. 5309 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5310 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5311 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5312 return MinOpRes; 5313 } 5314 5315 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5316 // The result is the min of all operands results. 5317 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5318 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5319 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5320 return MinOpRes; 5321 } 5322 5323 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5324 // The result is the min of all operands results. 5325 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5326 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5327 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5328 return MinOpRes; 5329 } 5330 5331 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5332 // For a SCEVUnknown, ask ValueTracking. 5333 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5334 return Known.countMinTrailingZeros(); 5335 } 5336 5337 // SCEVUDivExpr 5338 return 0; 5339 } 5340 5341 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5342 auto I = MinTrailingZerosCache.find(S); 5343 if (I != MinTrailingZerosCache.end()) 5344 return I->second; 5345 5346 uint32_t Result = GetMinTrailingZerosImpl(S); 5347 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5348 assert(InsertPair.second && "Should insert a new key"); 5349 return InsertPair.first->second; 5350 } 5351 5352 /// Helper method to assign a range to V from metadata present in the IR. 5353 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5354 if (Instruction *I = dyn_cast<Instruction>(V)) 5355 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5356 return getConstantRangeFromMetadata(*MD); 5357 5358 return None; 5359 } 5360 5361 /// Determine the range for a particular SCEV. If SignHint is 5362 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5363 /// with a "cleaner" unsigned (resp. signed) representation. 5364 const ConstantRange & 5365 ScalarEvolution::getRangeRef(const SCEV *S, 5366 ScalarEvolution::RangeSignHint SignHint) { 5367 DenseMap<const SCEV *, ConstantRange> &Cache = 5368 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5369 : SignedRanges; 5370 5371 // See if we've computed this range already. 5372 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5373 if (I != Cache.end()) 5374 return I->second; 5375 5376 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5377 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5378 5379 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5380 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5381 5382 // If the value has known zeros, the maximum value will have those known zeros 5383 // as well. 5384 uint32_t TZ = GetMinTrailingZeros(S); 5385 if (TZ != 0) { 5386 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5387 ConservativeResult = 5388 ConstantRange(APInt::getMinValue(BitWidth), 5389 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5390 else 5391 ConservativeResult = ConstantRange( 5392 APInt::getSignedMinValue(BitWidth), 5393 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5394 } 5395 5396 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5397 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5398 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5399 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5400 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5401 } 5402 5403 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5404 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5405 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5406 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5407 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5408 } 5409 5410 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5411 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5412 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5413 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5414 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5415 } 5416 5417 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5418 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5419 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5420 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5421 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5422 } 5423 5424 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5425 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5426 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5427 return setRange(UDiv, SignHint, 5428 ConservativeResult.intersectWith(X.udiv(Y))); 5429 } 5430 5431 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5432 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5433 return setRange(ZExt, SignHint, 5434 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5435 } 5436 5437 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5438 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5439 return setRange(SExt, SignHint, 5440 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5441 } 5442 5443 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5444 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5445 return setRange(Trunc, SignHint, 5446 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5447 } 5448 5449 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5450 // If there's no unsigned wrap, the value will never be less than its 5451 // initial value. 5452 if (AddRec->hasNoUnsignedWrap()) 5453 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5454 if (!C->getValue()->isZero()) 5455 ConservativeResult = ConservativeResult.intersectWith( 5456 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5457 5458 // If there's no signed wrap, and all the operands have the same sign or 5459 // zero, the value won't ever change sign. 5460 if (AddRec->hasNoSignedWrap()) { 5461 bool AllNonNeg = true; 5462 bool AllNonPos = true; 5463 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5464 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5465 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5466 } 5467 if (AllNonNeg) 5468 ConservativeResult = ConservativeResult.intersectWith( 5469 ConstantRange(APInt(BitWidth, 0), 5470 APInt::getSignedMinValue(BitWidth))); 5471 else if (AllNonPos) 5472 ConservativeResult = ConservativeResult.intersectWith( 5473 ConstantRange(APInt::getSignedMinValue(BitWidth), 5474 APInt(BitWidth, 1))); 5475 } 5476 5477 // TODO: non-affine addrec 5478 if (AddRec->isAffine()) { 5479 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5480 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5481 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5482 auto RangeFromAffine = getRangeForAffineAR( 5483 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5484 BitWidth); 5485 if (!RangeFromAffine.isFullSet()) 5486 ConservativeResult = 5487 ConservativeResult.intersectWith(RangeFromAffine); 5488 5489 auto RangeFromFactoring = getRangeViaFactoring( 5490 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5491 BitWidth); 5492 if (!RangeFromFactoring.isFullSet()) 5493 ConservativeResult = 5494 ConservativeResult.intersectWith(RangeFromFactoring); 5495 } 5496 } 5497 5498 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5499 } 5500 5501 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5502 // Check if the IR explicitly contains !range metadata. 5503 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5504 if (MDRange.hasValue()) 5505 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5506 5507 // Split here to avoid paying the compile-time cost of calling both 5508 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5509 // if needed. 5510 const DataLayout &DL = getDataLayout(); 5511 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5512 // For a SCEVUnknown, ask ValueTracking. 5513 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5514 if (Known.One != ~Known.Zero + 1) 5515 ConservativeResult = 5516 ConservativeResult.intersectWith(ConstantRange(Known.One, 5517 ~Known.Zero + 1)); 5518 } else { 5519 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5520 "generalize as needed!"); 5521 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5522 if (NS > 1) 5523 ConservativeResult = ConservativeResult.intersectWith( 5524 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5525 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5526 } 5527 5528 // A range of Phi is a subset of union of all ranges of its input. 5529 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5530 // Make sure that we do not run over cycled Phis. 5531 if (PendingPhiRanges.insert(Phi).second) { 5532 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5533 for (auto &Op : Phi->operands()) { 5534 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5535 RangeFromOps = RangeFromOps.unionWith(OpRange); 5536 // No point to continue if we already have a full set. 5537 if (RangeFromOps.isFullSet()) 5538 break; 5539 } 5540 ConservativeResult = ConservativeResult.intersectWith(RangeFromOps); 5541 bool Erased = PendingPhiRanges.erase(Phi); 5542 assert(Erased && "Failed to erase Phi properly?"); 5543 (void) Erased; 5544 } 5545 } 5546 5547 return setRange(U, SignHint, std::move(ConservativeResult)); 5548 } 5549 5550 return setRange(S, SignHint, std::move(ConservativeResult)); 5551 } 5552 5553 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5554 // values that the expression can take. Initially, the expression has a value 5555 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5556 // argument defines if we treat Step as signed or unsigned. 5557 static ConstantRange getRangeForAffineARHelper(APInt Step, 5558 const ConstantRange &StartRange, 5559 const APInt &MaxBECount, 5560 unsigned BitWidth, bool Signed) { 5561 // If either Step or MaxBECount is 0, then the expression won't change, and we 5562 // just need to return the initial range. 5563 if (Step == 0 || MaxBECount == 0) 5564 return StartRange; 5565 5566 // If we don't know anything about the initial value (i.e. StartRange is 5567 // FullRange), then we don't know anything about the final range either. 5568 // Return FullRange. 5569 if (StartRange.isFullSet()) 5570 return ConstantRange(BitWidth, /* isFullSet = */ true); 5571 5572 // If Step is signed and negative, then we use its absolute value, but we also 5573 // note that we're moving in the opposite direction. 5574 bool Descending = Signed && Step.isNegative(); 5575 5576 if (Signed) 5577 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5578 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5579 // This equations hold true due to the well-defined wrap-around behavior of 5580 // APInt. 5581 Step = Step.abs(); 5582 5583 // Check if Offset is more than full span of BitWidth. If it is, the 5584 // expression is guaranteed to overflow. 5585 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5586 return ConstantRange(BitWidth, /* isFullSet = */ true); 5587 5588 // Offset is by how much the expression can change. Checks above guarantee no 5589 // overflow here. 5590 APInt Offset = Step * MaxBECount; 5591 5592 // Minimum value of the final range will match the minimal value of StartRange 5593 // if the expression is increasing and will be decreased by Offset otherwise. 5594 // Maximum value of the final range will match the maximal value of StartRange 5595 // if the expression is decreasing and will be increased by Offset otherwise. 5596 APInt StartLower = StartRange.getLower(); 5597 APInt StartUpper = StartRange.getUpper() - 1; 5598 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5599 : (StartUpper + std::move(Offset)); 5600 5601 // It's possible that the new minimum/maximum value will fall into the initial 5602 // range (due to wrap around). This means that the expression can take any 5603 // value in this bitwidth, and we have to return full range. 5604 if (StartRange.contains(MovedBoundary)) 5605 return ConstantRange(BitWidth, /* isFullSet = */ true); 5606 5607 APInt NewLower = 5608 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5609 APInt NewUpper = 5610 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5611 NewUpper += 1; 5612 5613 // If we end up with full range, return a proper full range. 5614 if (NewLower == NewUpper) 5615 return ConstantRange(BitWidth, /* isFullSet = */ true); 5616 5617 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5618 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5619 } 5620 5621 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5622 const SCEV *Step, 5623 const SCEV *MaxBECount, 5624 unsigned BitWidth) { 5625 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5626 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5627 "Precondition!"); 5628 5629 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5630 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5631 5632 // First, consider step signed. 5633 ConstantRange StartSRange = getSignedRange(Start); 5634 ConstantRange StepSRange = getSignedRange(Step); 5635 5636 // If Step can be both positive and negative, we need to find ranges for the 5637 // maximum absolute step values in both directions and union them. 5638 ConstantRange SR = 5639 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5640 MaxBECountValue, BitWidth, /* Signed = */ true); 5641 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5642 StartSRange, MaxBECountValue, 5643 BitWidth, /* Signed = */ true)); 5644 5645 // Next, consider step unsigned. 5646 ConstantRange UR = getRangeForAffineARHelper( 5647 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5648 MaxBECountValue, BitWidth, /* Signed = */ false); 5649 5650 // Finally, intersect signed and unsigned ranges. 5651 return SR.intersectWith(UR); 5652 } 5653 5654 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5655 const SCEV *Step, 5656 const SCEV *MaxBECount, 5657 unsigned BitWidth) { 5658 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5659 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5660 5661 struct SelectPattern { 5662 Value *Condition = nullptr; 5663 APInt TrueValue; 5664 APInt FalseValue; 5665 5666 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5667 const SCEV *S) { 5668 Optional<unsigned> CastOp; 5669 APInt Offset(BitWidth, 0); 5670 5671 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5672 "Should be!"); 5673 5674 // Peel off a constant offset: 5675 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5676 // In the future we could consider being smarter here and handle 5677 // {Start+Step,+,Step} too. 5678 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5679 return; 5680 5681 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5682 S = SA->getOperand(1); 5683 } 5684 5685 // Peel off a cast operation 5686 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5687 CastOp = SCast->getSCEVType(); 5688 S = SCast->getOperand(); 5689 } 5690 5691 using namespace llvm::PatternMatch; 5692 5693 auto *SU = dyn_cast<SCEVUnknown>(S); 5694 const APInt *TrueVal, *FalseVal; 5695 if (!SU || 5696 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5697 m_APInt(FalseVal)))) { 5698 Condition = nullptr; 5699 return; 5700 } 5701 5702 TrueValue = *TrueVal; 5703 FalseValue = *FalseVal; 5704 5705 // Re-apply the cast we peeled off earlier 5706 if (CastOp.hasValue()) 5707 switch (*CastOp) { 5708 default: 5709 llvm_unreachable("Unknown SCEV cast type!"); 5710 5711 case scTruncate: 5712 TrueValue = TrueValue.trunc(BitWidth); 5713 FalseValue = FalseValue.trunc(BitWidth); 5714 break; 5715 case scZeroExtend: 5716 TrueValue = TrueValue.zext(BitWidth); 5717 FalseValue = FalseValue.zext(BitWidth); 5718 break; 5719 case scSignExtend: 5720 TrueValue = TrueValue.sext(BitWidth); 5721 FalseValue = FalseValue.sext(BitWidth); 5722 break; 5723 } 5724 5725 // Re-apply the constant offset we peeled off earlier 5726 TrueValue += Offset; 5727 FalseValue += Offset; 5728 } 5729 5730 bool isRecognized() { return Condition != nullptr; } 5731 }; 5732 5733 SelectPattern StartPattern(*this, BitWidth, Start); 5734 if (!StartPattern.isRecognized()) 5735 return ConstantRange(BitWidth, /* isFullSet = */ true); 5736 5737 SelectPattern StepPattern(*this, BitWidth, Step); 5738 if (!StepPattern.isRecognized()) 5739 return ConstantRange(BitWidth, /* isFullSet = */ true); 5740 5741 if (StartPattern.Condition != StepPattern.Condition) { 5742 // We don't handle this case today; but we could, by considering four 5743 // possibilities below instead of two. I'm not sure if there are cases where 5744 // that will help over what getRange already does, though. 5745 return ConstantRange(BitWidth, /* isFullSet = */ true); 5746 } 5747 5748 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5749 // construct arbitrary general SCEV expressions here. This function is called 5750 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5751 // say) can end up caching a suboptimal value. 5752 5753 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5754 // C2352 and C2512 (otherwise it isn't needed). 5755 5756 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5757 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5758 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5759 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5760 5761 ConstantRange TrueRange = 5762 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5763 ConstantRange FalseRange = 5764 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5765 5766 return TrueRange.unionWith(FalseRange); 5767 } 5768 5769 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5770 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5771 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5772 5773 // Return early if there are no flags to propagate to the SCEV. 5774 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5775 if (BinOp->hasNoUnsignedWrap()) 5776 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5777 if (BinOp->hasNoSignedWrap()) 5778 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5779 if (Flags == SCEV::FlagAnyWrap) 5780 return SCEV::FlagAnyWrap; 5781 5782 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5783 } 5784 5785 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5786 // Here we check that I is in the header of the innermost loop containing I, 5787 // since we only deal with instructions in the loop header. The actual loop we 5788 // need to check later will come from an add recurrence, but getting that 5789 // requires computing the SCEV of the operands, which can be expensive. This 5790 // check we can do cheaply to rule out some cases early. 5791 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5792 if (InnermostContainingLoop == nullptr || 5793 InnermostContainingLoop->getHeader() != I->getParent()) 5794 return false; 5795 5796 // Only proceed if we can prove that I does not yield poison. 5797 if (!programUndefinedIfFullPoison(I)) 5798 return false; 5799 5800 // At this point we know that if I is executed, then it does not wrap 5801 // according to at least one of NSW or NUW. If I is not executed, then we do 5802 // not know if the calculation that I represents would wrap. Multiple 5803 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5804 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5805 // derived from other instructions that map to the same SCEV. We cannot make 5806 // that guarantee for cases where I is not executed. So we need to find the 5807 // loop that I is considered in relation to and prove that I is executed for 5808 // every iteration of that loop. That implies that the value that I 5809 // calculates does not wrap anywhere in the loop, so then we can apply the 5810 // flags to the SCEV. 5811 // 5812 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5813 // from different loops, so that we know which loop to prove that I is 5814 // executed in. 5815 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5816 // I could be an extractvalue from a call to an overflow intrinsic. 5817 // TODO: We can do better here in some cases. 5818 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5819 return false; 5820 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5821 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5822 bool AllOtherOpsLoopInvariant = true; 5823 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5824 ++OtherOpIndex) { 5825 if (OtherOpIndex != OpIndex) { 5826 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5827 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5828 AllOtherOpsLoopInvariant = false; 5829 break; 5830 } 5831 } 5832 } 5833 if (AllOtherOpsLoopInvariant && 5834 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5835 return true; 5836 } 5837 } 5838 return false; 5839 } 5840 5841 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5842 // If we know that \c I can never be poison period, then that's enough. 5843 if (isSCEVExprNeverPoison(I)) 5844 return true; 5845 5846 // For an add recurrence specifically, we assume that infinite loops without 5847 // side effects are undefined behavior, and then reason as follows: 5848 // 5849 // If the add recurrence is poison in any iteration, it is poison on all 5850 // future iterations (since incrementing poison yields poison). If the result 5851 // of the add recurrence is fed into the loop latch condition and the loop 5852 // does not contain any throws or exiting blocks other than the latch, we now 5853 // have the ability to "choose" whether the backedge is taken or not (by 5854 // choosing a sufficiently evil value for the poison feeding into the branch) 5855 // for every iteration including and after the one in which \p I first became 5856 // poison. There are two possibilities (let's call the iteration in which \p 5857 // I first became poison as K): 5858 // 5859 // 1. In the set of iterations including and after K, the loop body executes 5860 // no side effects. In this case executing the backege an infinte number 5861 // of times will yield undefined behavior. 5862 // 5863 // 2. In the set of iterations including and after K, the loop body executes 5864 // at least one side effect. In this case, that specific instance of side 5865 // effect is control dependent on poison, which also yields undefined 5866 // behavior. 5867 5868 auto *ExitingBB = L->getExitingBlock(); 5869 auto *LatchBB = L->getLoopLatch(); 5870 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5871 return false; 5872 5873 SmallPtrSet<const Instruction *, 16> Pushed; 5874 SmallVector<const Instruction *, 8> PoisonStack; 5875 5876 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5877 // things that are known to be fully poison under that assumption go on the 5878 // PoisonStack. 5879 Pushed.insert(I); 5880 PoisonStack.push_back(I); 5881 5882 bool LatchControlDependentOnPoison = false; 5883 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5884 const Instruction *Poison = PoisonStack.pop_back_val(); 5885 5886 for (auto *PoisonUser : Poison->users()) { 5887 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5888 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5889 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5890 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5891 assert(BI->isConditional() && "Only possibility!"); 5892 if (BI->getParent() == LatchBB) { 5893 LatchControlDependentOnPoison = true; 5894 break; 5895 } 5896 } 5897 } 5898 } 5899 5900 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5901 } 5902 5903 ScalarEvolution::LoopProperties 5904 ScalarEvolution::getLoopProperties(const Loop *L) { 5905 using LoopProperties = ScalarEvolution::LoopProperties; 5906 5907 auto Itr = LoopPropertiesCache.find(L); 5908 if (Itr == LoopPropertiesCache.end()) { 5909 auto HasSideEffects = [](Instruction *I) { 5910 if (auto *SI = dyn_cast<StoreInst>(I)) 5911 return !SI->isSimple(); 5912 5913 return I->mayHaveSideEffects(); 5914 }; 5915 5916 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5917 /*HasNoSideEffects*/ true}; 5918 5919 for (auto *BB : L->getBlocks()) 5920 for (auto &I : *BB) { 5921 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5922 LP.HasNoAbnormalExits = false; 5923 if (HasSideEffects(&I)) 5924 LP.HasNoSideEffects = false; 5925 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5926 break; // We're already as pessimistic as we can get. 5927 } 5928 5929 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5930 assert(InsertPair.second && "We just checked!"); 5931 Itr = InsertPair.first; 5932 } 5933 5934 return Itr->second; 5935 } 5936 5937 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5938 if (!isSCEVable(V->getType())) 5939 return getUnknown(V); 5940 5941 if (Instruction *I = dyn_cast<Instruction>(V)) { 5942 // Don't attempt to analyze instructions in blocks that aren't 5943 // reachable. Such instructions don't matter, and they aren't required 5944 // to obey basic rules for definitions dominating uses which this 5945 // analysis depends on. 5946 if (!DT.isReachableFromEntry(I->getParent())) 5947 return getUnknown(V); 5948 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5949 return getConstant(CI); 5950 else if (isa<ConstantPointerNull>(V)) 5951 return getZero(V->getType()); 5952 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5953 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5954 else if (!isa<ConstantExpr>(V)) 5955 return getUnknown(V); 5956 5957 Operator *U = cast<Operator>(V); 5958 if (auto BO = MatchBinaryOp(U, DT)) { 5959 switch (BO->Opcode) { 5960 case Instruction::Add: { 5961 // The simple thing to do would be to just call getSCEV on both operands 5962 // and call getAddExpr with the result. However if we're looking at a 5963 // bunch of things all added together, this can be quite inefficient, 5964 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5965 // Instead, gather up all the operands and make a single getAddExpr call. 5966 // LLVM IR canonical form means we need only traverse the left operands. 5967 SmallVector<const SCEV *, 4> AddOps; 5968 do { 5969 if (BO->Op) { 5970 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5971 AddOps.push_back(OpSCEV); 5972 break; 5973 } 5974 5975 // If a NUW or NSW flag can be applied to the SCEV for this 5976 // addition, then compute the SCEV for this addition by itself 5977 // with a separate call to getAddExpr. We need to do that 5978 // instead of pushing the operands of the addition onto AddOps, 5979 // since the flags are only known to apply to this particular 5980 // addition - they may not apply to other additions that can be 5981 // formed with operands from AddOps. 5982 const SCEV *RHS = getSCEV(BO->RHS); 5983 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5984 if (Flags != SCEV::FlagAnyWrap) { 5985 const SCEV *LHS = getSCEV(BO->LHS); 5986 if (BO->Opcode == Instruction::Sub) 5987 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5988 else 5989 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5990 break; 5991 } 5992 } 5993 5994 if (BO->Opcode == Instruction::Sub) 5995 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5996 else 5997 AddOps.push_back(getSCEV(BO->RHS)); 5998 5999 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6000 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6001 NewBO->Opcode != Instruction::Sub)) { 6002 AddOps.push_back(getSCEV(BO->LHS)); 6003 break; 6004 } 6005 BO = NewBO; 6006 } while (true); 6007 6008 return getAddExpr(AddOps); 6009 } 6010 6011 case Instruction::Mul: { 6012 SmallVector<const SCEV *, 4> MulOps; 6013 do { 6014 if (BO->Op) { 6015 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6016 MulOps.push_back(OpSCEV); 6017 break; 6018 } 6019 6020 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6021 if (Flags != SCEV::FlagAnyWrap) { 6022 MulOps.push_back( 6023 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6024 break; 6025 } 6026 } 6027 6028 MulOps.push_back(getSCEV(BO->RHS)); 6029 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6030 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6031 MulOps.push_back(getSCEV(BO->LHS)); 6032 break; 6033 } 6034 BO = NewBO; 6035 } while (true); 6036 6037 return getMulExpr(MulOps); 6038 } 6039 case Instruction::UDiv: 6040 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6041 case Instruction::URem: 6042 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6043 case Instruction::Sub: { 6044 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6045 if (BO->Op) 6046 Flags = getNoWrapFlagsFromUB(BO->Op); 6047 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6048 } 6049 case Instruction::And: 6050 // For an expression like x&255 that merely masks off the high bits, 6051 // use zext(trunc(x)) as the SCEV expression. 6052 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6053 if (CI->isZero()) 6054 return getSCEV(BO->RHS); 6055 if (CI->isMinusOne()) 6056 return getSCEV(BO->LHS); 6057 const APInt &A = CI->getValue(); 6058 6059 // Instcombine's ShrinkDemandedConstant may strip bits out of 6060 // constants, obscuring what would otherwise be a low-bits mask. 6061 // Use computeKnownBits to compute what ShrinkDemandedConstant 6062 // knew about to reconstruct a low-bits mask value. 6063 unsigned LZ = A.countLeadingZeros(); 6064 unsigned TZ = A.countTrailingZeros(); 6065 unsigned BitWidth = A.getBitWidth(); 6066 KnownBits Known(BitWidth); 6067 computeKnownBits(BO->LHS, Known, getDataLayout(), 6068 0, &AC, nullptr, &DT); 6069 6070 APInt EffectiveMask = 6071 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6072 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6073 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6074 const SCEV *LHS = getSCEV(BO->LHS); 6075 const SCEV *ShiftedLHS = nullptr; 6076 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6077 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6078 // For an expression like (x * 8) & 8, simplify the multiply. 6079 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6080 unsigned GCD = std::min(MulZeros, TZ); 6081 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6082 SmallVector<const SCEV*, 4> MulOps; 6083 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6084 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6085 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6086 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6087 } 6088 } 6089 if (!ShiftedLHS) 6090 ShiftedLHS = getUDivExpr(LHS, MulCount); 6091 return getMulExpr( 6092 getZeroExtendExpr( 6093 getTruncateExpr(ShiftedLHS, 6094 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6095 BO->LHS->getType()), 6096 MulCount); 6097 } 6098 } 6099 break; 6100 6101 case Instruction::Or: 6102 // If the RHS of the Or is a constant, we may have something like: 6103 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6104 // optimizations will transparently handle this case. 6105 // 6106 // In order for this transformation to be safe, the LHS must be of the 6107 // form X*(2^n) and the Or constant must be less than 2^n. 6108 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6109 const SCEV *LHS = getSCEV(BO->LHS); 6110 const APInt &CIVal = CI->getValue(); 6111 if (GetMinTrailingZeros(LHS) >= 6112 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6113 // Build a plain add SCEV. 6114 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 6115 // If the LHS of the add was an addrec and it has no-wrap flags, 6116 // transfer the no-wrap flags, since an or won't introduce a wrap. 6117 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 6118 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 6119 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 6120 OldAR->getNoWrapFlags()); 6121 } 6122 return S; 6123 } 6124 } 6125 break; 6126 6127 case Instruction::Xor: 6128 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6129 // If the RHS of xor is -1, then this is a not operation. 6130 if (CI->isMinusOne()) 6131 return getNotSCEV(getSCEV(BO->LHS)); 6132 6133 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6134 // This is a variant of the check for xor with -1, and it handles 6135 // the case where instcombine has trimmed non-demanded bits out 6136 // of an xor with -1. 6137 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6138 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6139 if (LBO->getOpcode() == Instruction::And && 6140 LCI->getValue() == CI->getValue()) 6141 if (const SCEVZeroExtendExpr *Z = 6142 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6143 Type *UTy = BO->LHS->getType(); 6144 const SCEV *Z0 = Z->getOperand(); 6145 Type *Z0Ty = Z0->getType(); 6146 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6147 6148 // If C is a low-bits mask, the zero extend is serving to 6149 // mask off the high bits. Complement the operand and 6150 // re-apply the zext. 6151 if (CI->getValue().isMask(Z0TySize)) 6152 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6153 6154 // If C is a single bit, it may be in the sign-bit position 6155 // before the zero-extend. In this case, represent the xor 6156 // using an add, which is equivalent, and re-apply the zext. 6157 APInt Trunc = CI->getValue().trunc(Z0TySize); 6158 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6159 Trunc.isSignMask()) 6160 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6161 UTy); 6162 } 6163 } 6164 break; 6165 6166 case Instruction::Shl: 6167 // Turn shift left of a constant amount into a multiply. 6168 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6169 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6170 6171 // If the shift count is not less than the bitwidth, the result of 6172 // the shift is undefined. Don't try to analyze it, because the 6173 // resolution chosen here may differ from the resolution chosen in 6174 // other parts of the compiler. 6175 if (SA->getValue().uge(BitWidth)) 6176 break; 6177 6178 // It is currently not resolved how to interpret NSW for left 6179 // shift by BitWidth - 1, so we avoid applying flags in that 6180 // case. Remove this check (or this comment) once the situation 6181 // is resolved. See 6182 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6183 // and http://reviews.llvm.org/D8890 . 6184 auto Flags = SCEV::FlagAnyWrap; 6185 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6186 Flags = getNoWrapFlagsFromUB(BO->Op); 6187 6188 Constant *X = ConstantInt::get(getContext(), 6189 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6190 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6191 } 6192 break; 6193 6194 case Instruction::AShr: { 6195 // AShr X, C, where C is a constant. 6196 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6197 if (!CI) 6198 break; 6199 6200 Type *OuterTy = BO->LHS->getType(); 6201 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6202 // If the shift count is not less than the bitwidth, the result of 6203 // the shift is undefined. Don't try to analyze it, because the 6204 // resolution chosen here may differ from the resolution chosen in 6205 // other parts of the compiler. 6206 if (CI->getValue().uge(BitWidth)) 6207 break; 6208 6209 if (CI->isZero()) 6210 return getSCEV(BO->LHS); // shift by zero --> noop 6211 6212 uint64_t AShrAmt = CI->getZExtValue(); 6213 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6214 6215 Operator *L = dyn_cast<Operator>(BO->LHS); 6216 if (L && L->getOpcode() == Instruction::Shl) { 6217 // X = Shl A, n 6218 // Y = AShr X, m 6219 // Both n and m are constant. 6220 6221 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6222 if (L->getOperand(1) == BO->RHS) 6223 // For a two-shift sext-inreg, i.e. n = m, 6224 // use sext(trunc(x)) as the SCEV expression. 6225 return getSignExtendExpr( 6226 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6227 6228 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6229 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6230 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6231 if (ShlAmt > AShrAmt) { 6232 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6233 // expression. We already checked that ShlAmt < BitWidth, so 6234 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6235 // ShlAmt - AShrAmt < Amt. 6236 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6237 ShlAmt - AShrAmt); 6238 return getSignExtendExpr( 6239 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6240 getConstant(Mul)), OuterTy); 6241 } 6242 } 6243 } 6244 break; 6245 } 6246 } 6247 } 6248 6249 switch (U->getOpcode()) { 6250 case Instruction::Trunc: 6251 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6252 6253 case Instruction::ZExt: 6254 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6255 6256 case Instruction::SExt: 6257 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6258 // The NSW flag of a subtract does not always survive the conversion to 6259 // A + (-1)*B. By pushing sign extension onto its operands we are much 6260 // more likely to preserve NSW and allow later AddRec optimisations. 6261 // 6262 // NOTE: This is effectively duplicating this logic from getSignExtend: 6263 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6264 // but by that point the NSW information has potentially been lost. 6265 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6266 Type *Ty = U->getType(); 6267 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6268 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6269 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6270 } 6271 } 6272 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6273 6274 case Instruction::BitCast: 6275 // BitCasts are no-op casts so we just eliminate the cast. 6276 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6277 return getSCEV(U->getOperand(0)); 6278 break; 6279 6280 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6281 // lead to pointer expressions which cannot safely be expanded to GEPs, 6282 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6283 // simplifying integer expressions. 6284 6285 case Instruction::GetElementPtr: 6286 return createNodeForGEP(cast<GEPOperator>(U)); 6287 6288 case Instruction::PHI: 6289 return createNodeForPHI(cast<PHINode>(U)); 6290 6291 case Instruction::Select: 6292 // U can also be a select constant expr, which let fall through. Since 6293 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6294 // constant expressions cannot have instructions as operands, we'd have 6295 // returned getUnknown for a select constant expressions anyway. 6296 if (isa<Instruction>(U)) 6297 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6298 U->getOperand(1), U->getOperand(2)); 6299 break; 6300 6301 case Instruction::Call: 6302 case Instruction::Invoke: 6303 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6304 return getSCEV(RV); 6305 break; 6306 } 6307 6308 return getUnknown(V); 6309 } 6310 6311 //===----------------------------------------------------------------------===// 6312 // Iteration Count Computation Code 6313 // 6314 6315 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6316 if (!ExitCount) 6317 return 0; 6318 6319 ConstantInt *ExitConst = ExitCount->getValue(); 6320 6321 // Guard against huge trip counts. 6322 if (ExitConst->getValue().getActiveBits() > 32) 6323 return 0; 6324 6325 // In case of integer overflow, this returns 0, which is correct. 6326 return ((unsigned)ExitConst->getZExtValue()) + 1; 6327 } 6328 6329 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6330 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6331 return getSmallConstantTripCount(L, ExitingBB); 6332 6333 // No trip count information for multiple exits. 6334 return 0; 6335 } 6336 6337 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6338 BasicBlock *ExitingBlock) { 6339 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6340 assert(L->isLoopExiting(ExitingBlock) && 6341 "Exiting block must actually branch out of the loop!"); 6342 const SCEVConstant *ExitCount = 6343 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6344 return getConstantTripCount(ExitCount); 6345 } 6346 6347 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6348 const auto *MaxExitCount = 6349 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6350 return getConstantTripCount(MaxExitCount); 6351 } 6352 6353 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6354 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6355 return getSmallConstantTripMultiple(L, ExitingBB); 6356 6357 // No trip multiple information for multiple exits. 6358 return 0; 6359 } 6360 6361 /// Returns the largest constant divisor of the trip count of this loop as a 6362 /// normal unsigned value, if possible. This means that the actual trip count is 6363 /// always a multiple of the returned value (don't forget the trip count could 6364 /// very well be zero as well!). 6365 /// 6366 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6367 /// multiple of a constant (which is also the case if the trip count is simply 6368 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6369 /// if the trip count is very large (>= 2^32). 6370 /// 6371 /// As explained in the comments for getSmallConstantTripCount, this assumes 6372 /// that control exits the loop via ExitingBlock. 6373 unsigned 6374 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6375 BasicBlock *ExitingBlock) { 6376 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6377 assert(L->isLoopExiting(ExitingBlock) && 6378 "Exiting block must actually branch out of the loop!"); 6379 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6380 if (ExitCount == getCouldNotCompute()) 6381 return 1; 6382 6383 // Get the trip count from the BE count by adding 1. 6384 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6385 6386 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6387 if (!TC) 6388 // Attempt to factor more general cases. Returns the greatest power of 6389 // two divisor. If overflow happens, the trip count expression is still 6390 // divisible by the greatest power of 2 divisor returned. 6391 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6392 6393 ConstantInt *Result = TC->getValue(); 6394 6395 // Guard against huge trip counts (this requires checking 6396 // for zero to handle the case where the trip count == -1 and the 6397 // addition wraps). 6398 if (!Result || Result->getValue().getActiveBits() > 32 || 6399 Result->getValue().getActiveBits() == 0) 6400 return 1; 6401 6402 return (unsigned)Result->getZExtValue(); 6403 } 6404 6405 /// Get the expression for the number of loop iterations for which this loop is 6406 /// guaranteed not to exit via ExitingBlock. Otherwise return 6407 /// SCEVCouldNotCompute. 6408 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6409 BasicBlock *ExitingBlock) { 6410 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6411 } 6412 6413 const SCEV * 6414 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6415 SCEVUnionPredicate &Preds) { 6416 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6417 } 6418 6419 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6420 return getBackedgeTakenInfo(L).getExact(L, this); 6421 } 6422 6423 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6424 /// known never to be less than the actual backedge taken count. 6425 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6426 return getBackedgeTakenInfo(L).getMax(this); 6427 } 6428 6429 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6430 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6431 } 6432 6433 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6434 static void 6435 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6436 BasicBlock *Header = L->getHeader(); 6437 6438 // Push all Loop-header PHIs onto the Worklist stack. 6439 for (PHINode &PN : Header->phis()) 6440 Worklist.push_back(&PN); 6441 } 6442 6443 const ScalarEvolution::BackedgeTakenInfo & 6444 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6445 auto &BTI = getBackedgeTakenInfo(L); 6446 if (BTI.hasFullInfo()) 6447 return BTI; 6448 6449 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6450 6451 if (!Pair.second) 6452 return Pair.first->second; 6453 6454 BackedgeTakenInfo Result = 6455 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6456 6457 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6458 } 6459 6460 const ScalarEvolution::BackedgeTakenInfo & 6461 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6462 // Initially insert an invalid entry for this loop. If the insertion 6463 // succeeds, proceed to actually compute a backedge-taken count and 6464 // update the value. The temporary CouldNotCompute value tells SCEV 6465 // code elsewhere that it shouldn't attempt to request a new 6466 // backedge-taken count, which could result in infinite recursion. 6467 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6468 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6469 if (!Pair.second) 6470 return Pair.first->second; 6471 6472 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6473 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6474 // must be cleared in this scope. 6475 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6476 6477 if (Result.getExact(L, this) != getCouldNotCompute()) { 6478 assert(isLoopInvariant(Result.getExact(L, this), L) && 6479 isLoopInvariant(Result.getMax(this), L) && 6480 "Computed backedge-taken count isn't loop invariant for loop!"); 6481 ++NumTripCountsComputed; 6482 } 6483 else if (Result.getMax(this) == getCouldNotCompute() && 6484 isa<PHINode>(L->getHeader()->begin())) { 6485 // Only count loops that have phi nodes as not being computable. 6486 ++NumTripCountsNotComputed; 6487 } 6488 6489 // Now that we know more about the trip count for this loop, forget any 6490 // existing SCEV values for PHI nodes in this loop since they are only 6491 // conservative estimates made without the benefit of trip count 6492 // information. This is similar to the code in forgetLoop, except that 6493 // it handles SCEVUnknown PHI nodes specially. 6494 if (Result.hasAnyInfo()) { 6495 SmallVector<Instruction *, 16> Worklist; 6496 PushLoopPHIs(L, Worklist); 6497 6498 SmallPtrSet<Instruction *, 8> Discovered; 6499 while (!Worklist.empty()) { 6500 Instruction *I = Worklist.pop_back_val(); 6501 6502 ValueExprMapType::iterator It = 6503 ValueExprMap.find_as(static_cast<Value *>(I)); 6504 if (It != ValueExprMap.end()) { 6505 const SCEV *Old = It->second; 6506 6507 // SCEVUnknown for a PHI either means that it has an unrecognized 6508 // structure, or it's a PHI that's in the progress of being computed 6509 // by createNodeForPHI. In the former case, additional loop trip 6510 // count information isn't going to change anything. In the later 6511 // case, createNodeForPHI will perform the necessary updates on its 6512 // own when it gets to that point. 6513 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6514 eraseValueFromMap(It->first); 6515 forgetMemoizedResults(Old); 6516 } 6517 if (PHINode *PN = dyn_cast<PHINode>(I)) 6518 ConstantEvolutionLoopExitValue.erase(PN); 6519 } 6520 6521 // Since we don't need to invalidate anything for correctness and we're 6522 // only invalidating to make SCEV's results more precise, we get to stop 6523 // early to avoid invalidating too much. This is especially important in 6524 // cases like: 6525 // 6526 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6527 // loop0: 6528 // %pn0 = phi 6529 // ... 6530 // loop1: 6531 // %pn1 = phi 6532 // ... 6533 // 6534 // where both loop0 and loop1's backedge taken count uses the SCEV 6535 // expression for %v. If we don't have the early stop below then in cases 6536 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6537 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6538 // count for loop1, effectively nullifying SCEV's trip count cache. 6539 for (auto *U : I->users()) 6540 if (auto *I = dyn_cast<Instruction>(U)) { 6541 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6542 if (LoopForUser && L->contains(LoopForUser) && 6543 Discovered.insert(I).second) 6544 Worklist.push_back(I); 6545 } 6546 } 6547 } 6548 6549 // Re-lookup the insert position, since the call to 6550 // computeBackedgeTakenCount above could result in a 6551 // recusive call to getBackedgeTakenInfo (on a different 6552 // loop), which would invalidate the iterator computed 6553 // earlier. 6554 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6555 } 6556 6557 void ScalarEvolution::forgetLoop(const Loop *L) { 6558 // Drop any stored trip count value. 6559 auto RemoveLoopFromBackedgeMap = 6560 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6561 auto BTCPos = Map.find(L); 6562 if (BTCPos != Map.end()) { 6563 BTCPos->second.clear(); 6564 Map.erase(BTCPos); 6565 } 6566 }; 6567 6568 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6569 SmallVector<Instruction *, 32> Worklist; 6570 SmallPtrSet<Instruction *, 16> Visited; 6571 6572 // Iterate over all the loops and sub-loops to drop SCEV information. 6573 while (!LoopWorklist.empty()) { 6574 auto *CurrL = LoopWorklist.pop_back_val(); 6575 6576 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6577 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6578 6579 // Drop information about predicated SCEV rewrites for this loop. 6580 for (auto I = PredicatedSCEVRewrites.begin(); 6581 I != PredicatedSCEVRewrites.end();) { 6582 std::pair<const SCEV *, const Loop *> Entry = I->first; 6583 if (Entry.second == CurrL) 6584 PredicatedSCEVRewrites.erase(I++); 6585 else 6586 ++I; 6587 } 6588 6589 auto LoopUsersItr = LoopUsers.find(CurrL); 6590 if (LoopUsersItr != LoopUsers.end()) { 6591 for (auto *S : LoopUsersItr->second) 6592 forgetMemoizedResults(S); 6593 LoopUsers.erase(LoopUsersItr); 6594 } 6595 6596 // Drop information about expressions based on loop-header PHIs. 6597 PushLoopPHIs(CurrL, Worklist); 6598 6599 while (!Worklist.empty()) { 6600 Instruction *I = Worklist.pop_back_val(); 6601 if (!Visited.insert(I).second) 6602 continue; 6603 6604 ValueExprMapType::iterator It = 6605 ValueExprMap.find_as(static_cast<Value *>(I)); 6606 if (It != ValueExprMap.end()) { 6607 eraseValueFromMap(It->first); 6608 forgetMemoizedResults(It->second); 6609 if (PHINode *PN = dyn_cast<PHINode>(I)) 6610 ConstantEvolutionLoopExitValue.erase(PN); 6611 } 6612 6613 PushDefUseChildren(I, Worklist); 6614 } 6615 6616 LoopPropertiesCache.erase(CurrL); 6617 // Forget all contained loops too, to avoid dangling entries in the 6618 // ValuesAtScopes map. 6619 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6620 } 6621 } 6622 6623 void ScalarEvolution::forgetValue(Value *V) { 6624 Instruction *I = dyn_cast<Instruction>(V); 6625 if (!I) return; 6626 6627 // Drop information about expressions based on loop-header PHIs. 6628 SmallVector<Instruction *, 16> Worklist; 6629 Worklist.push_back(I); 6630 6631 SmallPtrSet<Instruction *, 8> Visited; 6632 while (!Worklist.empty()) { 6633 I = Worklist.pop_back_val(); 6634 if (!Visited.insert(I).second) 6635 continue; 6636 6637 ValueExprMapType::iterator It = 6638 ValueExprMap.find_as(static_cast<Value *>(I)); 6639 if (It != ValueExprMap.end()) { 6640 eraseValueFromMap(It->first); 6641 forgetMemoizedResults(It->second); 6642 if (PHINode *PN = dyn_cast<PHINode>(I)) 6643 ConstantEvolutionLoopExitValue.erase(PN); 6644 } 6645 6646 PushDefUseChildren(I, Worklist); 6647 } 6648 } 6649 6650 /// Get the exact loop backedge taken count considering all loop exits. A 6651 /// computable result can only be returned for loops with all exiting blocks 6652 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6653 /// is never skipped. This is a valid assumption as long as the loop exits via 6654 /// that test. For precise results, it is the caller's responsibility to specify 6655 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6656 const SCEV * 6657 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6658 SCEVUnionPredicate *Preds) const { 6659 // If any exits were not computable, the loop is not computable. 6660 if (!isComplete() || ExitNotTaken.empty()) 6661 return SE->getCouldNotCompute(); 6662 6663 const SCEV *BECount = nullptr; 6664 const BasicBlock *Latch = L->getLoopLatch(); 6665 // All exiting blocks we have collected must dominate the only backedge. 6666 if (!Latch) 6667 return SE->getCouldNotCompute(); 6668 6669 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6670 // count is simply a minimum out of all these calculated exit counts. 6671 for (auto &ENT : ExitNotTaken) { 6672 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6673 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6674 "We should only have known counts for exiting blocks that dominate " 6675 "latch!"); 6676 6677 if (!BECount) 6678 BECount = ENT.ExactNotTaken; 6679 else if (BECount != ENT.ExactNotTaken) 6680 BECount = SE->getUMinFromMismatchedTypes(BECount, ENT.ExactNotTaken); 6681 6682 if (Preds && !ENT.hasAlwaysTruePredicate()) 6683 Preds->add(ENT.Predicate.get()); 6684 6685 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6686 "Predicate should be always true!"); 6687 } 6688 6689 assert(BECount && "Invalid not taken count for loop exit"); 6690 return BECount; 6691 } 6692 6693 /// Get the exact not taken count for this loop exit. 6694 const SCEV * 6695 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6696 ScalarEvolution *SE) const { 6697 for (auto &ENT : ExitNotTaken) 6698 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6699 return ENT.ExactNotTaken; 6700 6701 return SE->getCouldNotCompute(); 6702 } 6703 6704 /// getMax - Get the max backedge taken count for the loop. 6705 const SCEV * 6706 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6707 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6708 return !ENT.hasAlwaysTruePredicate(); 6709 }; 6710 6711 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6712 return SE->getCouldNotCompute(); 6713 6714 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6715 "No point in having a non-constant max backedge taken count!"); 6716 return getMax(); 6717 } 6718 6719 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6720 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6721 return !ENT.hasAlwaysTruePredicate(); 6722 }; 6723 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6724 } 6725 6726 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6727 ScalarEvolution *SE) const { 6728 if (getMax() && getMax() != SE->getCouldNotCompute() && 6729 SE->hasOperand(getMax(), S)) 6730 return true; 6731 6732 for (auto &ENT : ExitNotTaken) 6733 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6734 SE->hasOperand(ENT.ExactNotTaken, S)) 6735 return true; 6736 6737 return false; 6738 } 6739 6740 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6741 : ExactNotTaken(E), MaxNotTaken(E) { 6742 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6743 isa<SCEVConstant>(MaxNotTaken)) && 6744 "No point in having a non-constant max backedge taken count!"); 6745 } 6746 6747 ScalarEvolution::ExitLimit::ExitLimit( 6748 const SCEV *E, const SCEV *M, bool MaxOrZero, 6749 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6750 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6751 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6752 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6753 "Exact is not allowed to be less precise than Max"); 6754 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6755 isa<SCEVConstant>(MaxNotTaken)) && 6756 "No point in having a non-constant max backedge taken count!"); 6757 for (auto *PredSet : PredSetList) 6758 for (auto *P : *PredSet) 6759 addPredicate(P); 6760 } 6761 6762 ScalarEvolution::ExitLimit::ExitLimit( 6763 const SCEV *E, const SCEV *M, bool MaxOrZero, 6764 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6765 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6766 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6767 isa<SCEVConstant>(MaxNotTaken)) && 6768 "No point in having a non-constant max backedge taken count!"); 6769 } 6770 6771 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6772 bool MaxOrZero) 6773 : ExitLimit(E, M, MaxOrZero, None) { 6774 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6775 isa<SCEVConstant>(MaxNotTaken)) && 6776 "No point in having a non-constant max backedge taken count!"); 6777 } 6778 6779 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6780 /// computable exit into a persistent ExitNotTakenInfo array. 6781 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6782 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6783 &&ExitCounts, 6784 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6785 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6786 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6787 6788 ExitNotTaken.reserve(ExitCounts.size()); 6789 std::transform( 6790 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6791 [&](const EdgeExitInfo &EEI) { 6792 BasicBlock *ExitBB = EEI.first; 6793 const ExitLimit &EL = EEI.second; 6794 if (EL.Predicates.empty()) 6795 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6796 6797 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6798 for (auto *Pred : EL.Predicates) 6799 Predicate->add(Pred); 6800 6801 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6802 }); 6803 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6804 "No point in having a non-constant max backedge taken count!"); 6805 } 6806 6807 /// Invalidate this result and free the ExitNotTakenInfo array. 6808 void ScalarEvolution::BackedgeTakenInfo::clear() { 6809 ExitNotTaken.clear(); 6810 } 6811 6812 /// Compute the number of times the backedge of the specified loop will execute. 6813 ScalarEvolution::BackedgeTakenInfo 6814 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6815 bool AllowPredicates) { 6816 SmallVector<BasicBlock *, 8> ExitingBlocks; 6817 L->getExitingBlocks(ExitingBlocks); 6818 6819 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6820 6821 SmallVector<EdgeExitInfo, 4> ExitCounts; 6822 bool CouldComputeBECount = true; 6823 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6824 const SCEV *MustExitMaxBECount = nullptr; 6825 const SCEV *MayExitMaxBECount = nullptr; 6826 bool MustExitMaxOrZero = false; 6827 6828 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6829 // and compute maxBECount. 6830 // Do a union of all the predicates here. 6831 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6832 BasicBlock *ExitBB = ExitingBlocks[i]; 6833 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6834 6835 assert((AllowPredicates || EL.Predicates.empty()) && 6836 "Predicated exit limit when predicates are not allowed!"); 6837 6838 // 1. For each exit that can be computed, add an entry to ExitCounts. 6839 // CouldComputeBECount is true only if all exits can be computed. 6840 if (EL.ExactNotTaken == getCouldNotCompute()) 6841 // We couldn't compute an exact value for this exit, so 6842 // we won't be able to compute an exact value for the loop. 6843 CouldComputeBECount = false; 6844 else 6845 ExitCounts.emplace_back(ExitBB, EL); 6846 6847 // 2. Derive the loop's MaxBECount from each exit's max number of 6848 // non-exiting iterations. Partition the loop exits into two kinds: 6849 // LoopMustExits and LoopMayExits. 6850 // 6851 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6852 // is a LoopMayExit. If any computable LoopMustExit is found, then 6853 // MaxBECount is the minimum EL.MaxNotTaken of computable 6854 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6855 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6856 // computable EL.MaxNotTaken. 6857 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6858 DT.dominates(ExitBB, Latch)) { 6859 if (!MustExitMaxBECount) { 6860 MustExitMaxBECount = EL.MaxNotTaken; 6861 MustExitMaxOrZero = EL.MaxOrZero; 6862 } else { 6863 MustExitMaxBECount = 6864 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6865 } 6866 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6867 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6868 MayExitMaxBECount = EL.MaxNotTaken; 6869 else { 6870 MayExitMaxBECount = 6871 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6872 } 6873 } 6874 } 6875 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6876 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6877 // The loop backedge will be taken the maximum or zero times if there's 6878 // a single exit that must be taken the maximum or zero times. 6879 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6880 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6881 MaxBECount, MaxOrZero); 6882 } 6883 6884 ScalarEvolution::ExitLimit 6885 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6886 bool AllowPredicates) { 6887 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 6888 // If our exiting block does not dominate the latch, then its connection with 6889 // loop's exit limit may be far from trivial. 6890 const BasicBlock *Latch = L->getLoopLatch(); 6891 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 6892 return getCouldNotCompute(); 6893 6894 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6895 TerminatorInst *Term = ExitingBlock->getTerminator(); 6896 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 6897 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 6898 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 6899 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 6900 "It should have one successor in loop and one exit block!"); 6901 // Proceed to the next level to examine the exit condition expression. 6902 return computeExitLimitFromCond( 6903 L, BI->getCondition(), ExitIfTrue, 6904 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 6905 } 6906 6907 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 6908 // For switch, make sure that there is a single exit from the loop. 6909 BasicBlock *Exit = nullptr; 6910 for (auto *SBB : successors(ExitingBlock)) 6911 if (!L->contains(SBB)) { 6912 if (Exit) // Multiple exit successors. 6913 return getCouldNotCompute(); 6914 Exit = SBB; 6915 } 6916 assert(Exit && "Exiting block must have at least one exit"); 6917 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 6918 /*ControlsExit=*/IsOnlyExit); 6919 } 6920 6921 return getCouldNotCompute(); 6922 } 6923 6924 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 6925 const Loop *L, Value *ExitCond, bool ExitIfTrue, 6926 bool ControlsExit, bool AllowPredicates) { 6927 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 6928 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 6929 ControlsExit, AllowPredicates); 6930 } 6931 6932 Optional<ScalarEvolution::ExitLimit> 6933 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 6934 bool ExitIfTrue, bool ControlsExit, 6935 bool AllowPredicates) { 6936 (void)this->L; 6937 (void)this->ExitIfTrue; 6938 (void)this->AllowPredicates; 6939 6940 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 6941 this->AllowPredicates == AllowPredicates && 6942 "Variance in assumed invariant key components!"); 6943 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 6944 if (Itr == TripCountMap.end()) 6945 return None; 6946 return Itr->second; 6947 } 6948 6949 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 6950 bool ExitIfTrue, 6951 bool ControlsExit, 6952 bool AllowPredicates, 6953 const ExitLimit &EL) { 6954 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 6955 this->AllowPredicates == AllowPredicates && 6956 "Variance in assumed invariant key components!"); 6957 6958 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 6959 assert(InsertResult.second && "Expected successful insertion!"); 6960 (void)InsertResult; 6961 (void)ExitIfTrue; 6962 } 6963 6964 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 6965 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 6966 bool ControlsExit, bool AllowPredicates) { 6967 6968 if (auto MaybeEL = 6969 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 6970 return *MaybeEL; 6971 6972 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 6973 ControlsExit, AllowPredicates); 6974 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 6975 return EL; 6976 } 6977 6978 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 6979 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 6980 bool ControlsExit, bool AllowPredicates) { 6981 // Check if the controlling expression for this loop is an And or Or. 6982 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 6983 if (BO->getOpcode() == Instruction::And) { 6984 // Recurse on the operands of the and. 6985 bool EitherMayExit = !ExitIfTrue; 6986 ExitLimit EL0 = computeExitLimitFromCondCached( 6987 Cache, L, BO->getOperand(0), ExitIfTrue, 6988 ControlsExit && !EitherMayExit, AllowPredicates); 6989 ExitLimit EL1 = computeExitLimitFromCondCached( 6990 Cache, L, BO->getOperand(1), ExitIfTrue, 6991 ControlsExit && !EitherMayExit, AllowPredicates); 6992 const SCEV *BECount = getCouldNotCompute(); 6993 const SCEV *MaxBECount = getCouldNotCompute(); 6994 if (EitherMayExit) { 6995 // Both conditions must be true for the loop to continue executing. 6996 // Choose the less conservative count. 6997 if (EL0.ExactNotTaken == getCouldNotCompute() || 6998 EL1.ExactNotTaken == getCouldNotCompute()) 6999 BECount = getCouldNotCompute(); 7000 else 7001 BECount = 7002 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7003 if (EL0.MaxNotTaken == getCouldNotCompute()) 7004 MaxBECount = EL1.MaxNotTaken; 7005 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7006 MaxBECount = EL0.MaxNotTaken; 7007 else 7008 MaxBECount = 7009 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7010 } else { 7011 // Both conditions must be true at the same time for the loop to exit. 7012 // For now, be conservative. 7013 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7014 MaxBECount = EL0.MaxNotTaken; 7015 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7016 BECount = EL0.ExactNotTaken; 7017 } 7018 7019 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7020 // to be more aggressive when computing BECount than when computing 7021 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7022 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7023 // to not. 7024 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7025 !isa<SCEVCouldNotCompute>(BECount)) 7026 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7027 7028 return ExitLimit(BECount, MaxBECount, false, 7029 {&EL0.Predicates, &EL1.Predicates}); 7030 } 7031 if (BO->getOpcode() == Instruction::Or) { 7032 // Recurse on the operands of the or. 7033 bool EitherMayExit = ExitIfTrue; 7034 ExitLimit EL0 = computeExitLimitFromCondCached( 7035 Cache, L, BO->getOperand(0), ExitIfTrue, 7036 ControlsExit && !EitherMayExit, AllowPredicates); 7037 ExitLimit EL1 = computeExitLimitFromCondCached( 7038 Cache, L, BO->getOperand(1), ExitIfTrue, 7039 ControlsExit && !EitherMayExit, AllowPredicates); 7040 const SCEV *BECount = getCouldNotCompute(); 7041 const SCEV *MaxBECount = getCouldNotCompute(); 7042 if (EitherMayExit) { 7043 // Both conditions must be false for the loop to continue executing. 7044 // Choose the less conservative count. 7045 if (EL0.ExactNotTaken == getCouldNotCompute() || 7046 EL1.ExactNotTaken == getCouldNotCompute()) 7047 BECount = getCouldNotCompute(); 7048 else 7049 BECount = 7050 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7051 if (EL0.MaxNotTaken == getCouldNotCompute()) 7052 MaxBECount = EL1.MaxNotTaken; 7053 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7054 MaxBECount = EL0.MaxNotTaken; 7055 else 7056 MaxBECount = 7057 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7058 } else { 7059 // Both conditions must be false at the same time for the loop to exit. 7060 // For now, be conservative. 7061 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7062 MaxBECount = EL0.MaxNotTaken; 7063 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7064 BECount = EL0.ExactNotTaken; 7065 } 7066 7067 return ExitLimit(BECount, MaxBECount, false, 7068 {&EL0.Predicates, &EL1.Predicates}); 7069 } 7070 } 7071 7072 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7073 // Proceed to the next level to examine the icmp. 7074 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7075 ExitLimit EL = 7076 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7077 if (EL.hasFullInfo() || !AllowPredicates) 7078 return EL; 7079 7080 // Try again, but use SCEV predicates this time. 7081 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7082 /*AllowPredicates=*/true); 7083 } 7084 7085 // Check for a constant condition. These are normally stripped out by 7086 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7087 // preserve the CFG and is temporarily leaving constant conditions 7088 // in place. 7089 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7090 if (ExitIfTrue == !CI->getZExtValue()) 7091 // The backedge is always taken. 7092 return getCouldNotCompute(); 7093 else 7094 // The backedge is never taken. 7095 return getZero(CI->getType()); 7096 } 7097 7098 // If it's not an integer or pointer comparison then compute it the hard way. 7099 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7100 } 7101 7102 ScalarEvolution::ExitLimit 7103 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7104 ICmpInst *ExitCond, 7105 bool ExitIfTrue, 7106 bool ControlsExit, 7107 bool AllowPredicates) { 7108 // If the condition was exit on true, convert the condition to exit on false 7109 ICmpInst::Predicate Pred; 7110 if (!ExitIfTrue) 7111 Pred = ExitCond->getPredicate(); 7112 else 7113 Pred = ExitCond->getInversePredicate(); 7114 const ICmpInst::Predicate OriginalPred = Pred; 7115 7116 // Handle common loops like: for (X = "string"; *X; ++X) 7117 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7118 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7119 ExitLimit ItCnt = 7120 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7121 if (ItCnt.hasAnyInfo()) 7122 return ItCnt; 7123 } 7124 7125 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7126 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7127 7128 // Try to evaluate any dependencies out of the loop. 7129 LHS = getSCEVAtScope(LHS, L); 7130 RHS = getSCEVAtScope(RHS, L); 7131 7132 // At this point, we would like to compute how many iterations of the 7133 // loop the predicate will return true for these inputs. 7134 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7135 // If there is a loop-invariant, force it into the RHS. 7136 std::swap(LHS, RHS); 7137 Pred = ICmpInst::getSwappedPredicate(Pred); 7138 } 7139 7140 // Simplify the operands before analyzing them. 7141 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7142 7143 // If we have a comparison of a chrec against a constant, try to use value 7144 // ranges to answer this query. 7145 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7146 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7147 if (AddRec->getLoop() == L) { 7148 // Form the constant range. 7149 ConstantRange CompRange = 7150 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7151 7152 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7153 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7154 } 7155 7156 switch (Pred) { 7157 case ICmpInst::ICMP_NE: { // while (X != Y) 7158 // Convert to: while (X-Y != 0) 7159 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7160 AllowPredicates); 7161 if (EL.hasAnyInfo()) return EL; 7162 break; 7163 } 7164 case ICmpInst::ICMP_EQ: { // while (X == Y) 7165 // Convert to: while (X-Y == 0) 7166 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7167 if (EL.hasAnyInfo()) return EL; 7168 break; 7169 } 7170 case ICmpInst::ICMP_SLT: 7171 case ICmpInst::ICMP_ULT: { // while (X < Y) 7172 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7173 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7174 AllowPredicates); 7175 if (EL.hasAnyInfo()) return EL; 7176 break; 7177 } 7178 case ICmpInst::ICMP_SGT: 7179 case ICmpInst::ICMP_UGT: { // while (X > Y) 7180 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7181 ExitLimit EL = 7182 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7183 AllowPredicates); 7184 if (EL.hasAnyInfo()) return EL; 7185 break; 7186 } 7187 default: 7188 break; 7189 } 7190 7191 auto *ExhaustiveCount = 7192 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7193 7194 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7195 return ExhaustiveCount; 7196 7197 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7198 ExitCond->getOperand(1), L, OriginalPred); 7199 } 7200 7201 ScalarEvolution::ExitLimit 7202 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7203 SwitchInst *Switch, 7204 BasicBlock *ExitingBlock, 7205 bool ControlsExit) { 7206 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7207 7208 // Give up if the exit is the default dest of a switch. 7209 if (Switch->getDefaultDest() == ExitingBlock) 7210 return getCouldNotCompute(); 7211 7212 assert(L->contains(Switch->getDefaultDest()) && 7213 "Default case must not exit the loop!"); 7214 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7215 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7216 7217 // while (X != Y) --> while (X-Y != 0) 7218 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7219 if (EL.hasAnyInfo()) 7220 return EL; 7221 7222 return getCouldNotCompute(); 7223 } 7224 7225 static ConstantInt * 7226 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7227 ScalarEvolution &SE) { 7228 const SCEV *InVal = SE.getConstant(C); 7229 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7230 assert(isa<SCEVConstant>(Val) && 7231 "Evaluation of SCEV at constant didn't fold correctly?"); 7232 return cast<SCEVConstant>(Val)->getValue(); 7233 } 7234 7235 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7236 /// compute the backedge execution count. 7237 ScalarEvolution::ExitLimit 7238 ScalarEvolution::computeLoadConstantCompareExitLimit( 7239 LoadInst *LI, 7240 Constant *RHS, 7241 const Loop *L, 7242 ICmpInst::Predicate predicate) { 7243 if (LI->isVolatile()) return getCouldNotCompute(); 7244 7245 // Check to see if the loaded pointer is a getelementptr of a global. 7246 // TODO: Use SCEV instead of manually grubbing with GEPs. 7247 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7248 if (!GEP) return getCouldNotCompute(); 7249 7250 // Make sure that it is really a constant global we are gepping, with an 7251 // initializer, and make sure the first IDX is really 0. 7252 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7253 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7254 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7255 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7256 return getCouldNotCompute(); 7257 7258 // Okay, we allow one non-constant index into the GEP instruction. 7259 Value *VarIdx = nullptr; 7260 std::vector<Constant*> Indexes; 7261 unsigned VarIdxNum = 0; 7262 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7263 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7264 Indexes.push_back(CI); 7265 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7266 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7267 VarIdx = GEP->getOperand(i); 7268 VarIdxNum = i-2; 7269 Indexes.push_back(nullptr); 7270 } 7271 7272 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7273 if (!VarIdx) 7274 return getCouldNotCompute(); 7275 7276 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7277 // Check to see if X is a loop variant variable value now. 7278 const SCEV *Idx = getSCEV(VarIdx); 7279 Idx = getSCEVAtScope(Idx, L); 7280 7281 // We can only recognize very limited forms of loop index expressions, in 7282 // particular, only affine AddRec's like {C1,+,C2}. 7283 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7284 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7285 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7286 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7287 return getCouldNotCompute(); 7288 7289 unsigned MaxSteps = MaxBruteForceIterations; 7290 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7291 ConstantInt *ItCst = ConstantInt::get( 7292 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7293 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7294 7295 // Form the GEP offset. 7296 Indexes[VarIdxNum] = Val; 7297 7298 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7299 Indexes); 7300 if (!Result) break; // Cannot compute! 7301 7302 // Evaluate the condition for this iteration. 7303 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7304 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7305 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7306 ++NumArrayLenItCounts; 7307 return getConstant(ItCst); // Found terminating iteration! 7308 } 7309 } 7310 return getCouldNotCompute(); 7311 } 7312 7313 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7314 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7315 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7316 if (!RHS) 7317 return getCouldNotCompute(); 7318 7319 const BasicBlock *Latch = L->getLoopLatch(); 7320 if (!Latch) 7321 return getCouldNotCompute(); 7322 7323 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7324 if (!Predecessor) 7325 return getCouldNotCompute(); 7326 7327 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7328 // Return LHS in OutLHS and shift_opt in OutOpCode. 7329 auto MatchPositiveShift = 7330 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7331 7332 using namespace PatternMatch; 7333 7334 ConstantInt *ShiftAmt; 7335 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7336 OutOpCode = Instruction::LShr; 7337 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7338 OutOpCode = Instruction::AShr; 7339 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7340 OutOpCode = Instruction::Shl; 7341 else 7342 return false; 7343 7344 return ShiftAmt->getValue().isStrictlyPositive(); 7345 }; 7346 7347 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7348 // 7349 // loop: 7350 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7351 // %iv.shifted = lshr i32 %iv, <positive constant> 7352 // 7353 // Return true on a successful match. Return the corresponding PHI node (%iv 7354 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7355 auto MatchShiftRecurrence = 7356 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7357 Optional<Instruction::BinaryOps> PostShiftOpCode; 7358 7359 { 7360 Instruction::BinaryOps OpC; 7361 Value *V; 7362 7363 // If we encounter a shift instruction, "peel off" the shift operation, 7364 // and remember that we did so. Later when we inspect %iv's backedge 7365 // value, we will make sure that the backedge value uses the same 7366 // operation. 7367 // 7368 // Note: the peeled shift operation does not have to be the same 7369 // instruction as the one feeding into the PHI's backedge value. We only 7370 // really care about it being the same *kind* of shift instruction -- 7371 // that's all that is required for our later inferences to hold. 7372 if (MatchPositiveShift(LHS, V, OpC)) { 7373 PostShiftOpCode = OpC; 7374 LHS = V; 7375 } 7376 } 7377 7378 PNOut = dyn_cast<PHINode>(LHS); 7379 if (!PNOut || PNOut->getParent() != L->getHeader()) 7380 return false; 7381 7382 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7383 Value *OpLHS; 7384 7385 return 7386 // The backedge value for the PHI node must be a shift by a positive 7387 // amount 7388 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7389 7390 // of the PHI node itself 7391 OpLHS == PNOut && 7392 7393 // and the kind of shift should be match the kind of shift we peeled 7394 // off, if any. 7395 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7396 }; 7397 7398 PHINode *PN; 7399 Instruction::BinaryOps OpCode; 7400 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7401 return getCouldNotCompute(); 7402 7403 const DataLayout &DL = getDataLayout(); 7404 7405 // The key rationale for this optimization is that for some kinds of shift 7406 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7407 // within a finite number of iterations. If the condition guarding the 7408 // backedge (in the sense that the backedge is taken if the condition is true) 7409 // is false for the value the shift recurrence stabilizes to, then we know 7410 // that the backedge is taken only a finite number of times. 7411 7412 ConstantInt *StableValue = nullptr; 7413 switch (OpCode) { 7414 default: 7415 llvm_unreachable("Impossible case!"); 7416 7417 case Instruction::AShr: { 7418 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7419 // bitwidth(K) iterations. 7420 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7421 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7422 Predecessor->getTerminator(), &DT); 7423 auto *Ty = cast<IntegerType>(RHS->getType()); 7424 if (Known.isNonNegative()) 7425 StableValue = ConstantInt::get(Ty, 0); 7426 else if (Known.isNegative()) 7427 StableValue = ConstantInt::get(Ty, -1, true); 7428 else 7429 return getCouldNotCompute(); 7430 7431 break; 7432 } 7433 case Instruction::LShr: 7434 case Instruction::Shl: 7435 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7436 // stabilize to 0 in at most bitwidth(K) iterations. 7437 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7438 break; 7439 } 7440 7441 auto *Result = 7442 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7443 assert(Result->getType()->isIntegerTy(1) && 7444 "Otherwise cannot be an operand to a branch instruction"); 7445 7446 if (Result->isZeroValue()) { 7447 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7448 const SCEV *UpperBound = 7449 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7450 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7451 } 7452 7453 return getCouldNotCompute(); 7454 } 7455 7456 /// Return true if we can constant fold an instruction of the specified type, 7457 /// assuming that all operands were constants. 7458 static bool CanConstantFold(const Instruction *I) { 7459 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7460 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7461 isa<LoadInst>(I)) 7462 return true; 7463 7464 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7465 if (const Function *F = CI->getCalledFunction()) 7466 return canConstantFoldCallTo(CI, F); 7467 return false; 7468 } 7469 7470 /// Determine whether this instruction can constant evolve within this loop 7471 /// assuming its operands can all constant evolve. 7472 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7473 // An instruction outside of the loop can't be derived from a loop PHI. 7474 if (!L->contains(I)) return false; 7475 7476 if (isa<PHINode>(I)) { 7477 // We don't currently keep track of the control flow needed to evaluate 7478 // PHIs, so we cannot handle PHIs inside of loops. 7479 return L->getHeader() == I->getParent(); 7480 } 7481 7482 // If we won't be able to constant fold this expression even if the operands 7483 // are constants, bail early. 7484 return CanConstantFold(I); 7485 } 7486 7487 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7488 /// recursing through each instruction operand until reaching a loop header phi. 7489 static PHINode * 7490 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7491 DenseMap<Instruction *, PHINode *> &PHIMap, 7492 unsigned Depth) { 7493 if (Depth > MaxConstantEvolvingDepth) 7494 return nullptr; 7495 7496 // Otherwise, we can evaluate this instruction if all of its operands are 7497 // constant or derived from a PHI node themselves. 7498 PHINode *PHI = nullptr; 7499 for (Value *Op : UseInst->operands()) { 7500 if (isa<Constant>(Op)) continue; 7501 7502 Instruction *OpInst = dyn_cast<Instruction>(Op); 7503 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7504 7505 PHINode *P = dyn_cast<PHINode>(OpInst); 7506 if (!P) 7507 // If this operand is already visited, reuse the prior result. 7508 // We may have P != PHI if this is the deepest point at which the 7509 // inconsistent paths meet. 7510 P = PHIMap.lookup(OpInst); 7511 if (!P) { 7512 // Recurse and memoize the results, whether a phi is found or not. 7513 // This recursive call invalidates pointers into PHIMap. 7514 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7515 PHIMap[OpInst] = P; 7516 } 7517 if (!P) 7518 return nullptr; // Not evolving from PHI 7519 if (PHI && PHI != P) 7520 return nullptr; // Evolving from multiple different PHIs. 7521 PHI = P; 7522 } 7523 // This is a expression evolving from a constant PHI! 7524 return PHI; 7525 } 7526 7527 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7528 /// in the loop that V is derived from. We allow arbitrary operations along the 7529 /// way, but the operands of an operation must either be constants or a value 7530 /// derived from a constant PHI. If this expression does not fit with these 7531 /// constraints, return null. 7532 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7533 Instruction *I = dyn_cast<Instruction>(V); 7534 if (!I || !canConstantEvolve(I, L)) return nullptr; 7535 7536 if (PHINode *PN = dyn_cast<PHINode>(I)) 7537 return PN; 7538 7539 // Record non-constant instructions contained by the loop. 7540 DenseMap<Instruction *, PHINode *> PHIMap; 7541 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7542 } 7543 7544 /// EvaluateExpression - Given an expression that passes the 7545 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7546 /// in the loop has the value PHIVal. If we can't fold this expression for some 7547 /// reason, return null. 7548 static Constant *EvaluateExpression(Value *V, const Loop *L, 7549 DenseMap<Instruction *, Constant *> &Vals, 7550 const DataLayout &DL, 7551 const TargetLibraryInfo *TLI) { 7552 // Convenient constant check, but redundant for recursive calls. 7553 if (Constant *C = dyn_cast<Constant>(V)) return C; 7554 Instruction *I = dyn_cast<Instruction>(V); 7555 if (!I) return nullptr; 7556 7557 if (Constant *C = Vals.lookup(I)) return C; 7558 7559 // An instruction inside the loop depends on a value outside the loop that we 7560 // weren't given a mapping for, or a value such as a call inside the loop. 7561 if (!canConstantEvolve(I, L)) return nullptr; 7562 7563 // An unmapped PHI can be due to a branch or another loop inside this loop, 7564 // or due to this not being the initial iteration through a loop where we 7565 // couldn't compute the evolution of this particular PHI last time. 7566 if (isa<PHINode>(I)) return nullptr; 7567 7568 std::vector<Constant*> Operands(I->getNumOperands()); 7569 7570 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7571 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7572 if (!Operand) { 7573 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7574 if (!Operands[i]) return nullptr; 7575 continue; 7576 } 7577 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7578 Vals[Operand] = C; 7579 if (!C) return nullptr; 7580 Operands[i] = C; 7581 } 7582 7583 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7584 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7585 Operands[1], DL, TLI); 7586 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7587 if (!LI->isVolatile()) 7588 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7589 } 7590 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7591 } 7592 7593 7594 // If every incoming value to PN except the one for BB is a specific Constant, 7595 // return that, else return nullptr. 7596 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7597 Constant *IncomingVal = nullptr; 7598 7599 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7600 if (PN->getIncomingBlock(i) == BB) 7601 continue; 7602 7603 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7604 if (!CurrentVal) 7605 return nullptr; 7606 7607 if (IncomingVal != CurrentVal) { 7608 if (IncomingVal) 7609 return nullptr; 7610 IncomingVal = CurrentVal; 7611 } 7612 } 7613 7614 return IncomingVal; 7615 } 7616 7617 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7618 /// in the header of its containing loop, we know the loop executes a 7619 /// constant number of times, and the PHI node is just a recurrence 7620 /// involving constants, fold it. 7621 Constant * 7622 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7623 const APInt &BEs, 7624 const Loop *L) { 7625 auto I = ConstantEvolutionLoopExitValue.find(PN); 7626 if (I != ConstantEvolutionLoopExitValue.end()) 7627 return I->second; 7628 7629 if (BEs.ugt(MaxBruteForceIterations)) 7630 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7631 7632 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7633 7634 DenseMap<Instruction *, Constant *> CurrentIterVals; 7635 BasicBlock *Header = L->getHeader(); 7636 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7637 7638 BasicBlock *Latch = L->getLoopLatch(); 7639 if (!Latch) 7640 return nullptr; 7641 7642 for (PHINode &PHI : Header->phis()) { 7643 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7644 CurrentIterVals[&PHI] = StartCST; 7645 } 7646 if (!CurrentIterVals.count(PN)) 7647 return RetVal = nullptr; 7648 7649 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7650 7651 // Execute the loop symbolically to determine the exit value. 7652 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7653 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7654 7655 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7656 unsigned IterationNum = 0; 7657 const DataLayout &DL = getDataLayout(); 7658 for (; ; ++IterationNum) { 7659 if (IterationNum == NumIterations) 7660 return RetVal = CurrentIterVals[PN]; // Got exit value! 7661 7662 // Compute the value of the PHIs for the next iteration. 7663 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7664 DenseMap<Instruction *, Constant *> NextIterVals; 7665 Constant *NextPHI = 7666 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7667 if (!NextPHI) 7668 return nullptr; // Couldn't evaluate! 7669 NextIterVals[PN] = NextPHI; 7670 7671 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7672 7673 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7674 // cease to be able to evaluate one of them or if they stop evolving, 7675 // because that doesn't necessarily prevent us from computing PN. 7676 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7677 for (const auto &I : CurrentIterVals) { 7678 PHINode *PHI = dyn_cast<PHINode>(I.first); 7679 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7680 PHIsToCompute.emplace_back(PHI, I.second); 7681 } 7682 // We use two distinct loops because EvaluateExpression may invalidate any 7683 // iterators into CurrentIterVals. 7684 for (const auto &I : PHIsToCompute) { 7685 PHINode *PHI = I.first; 7686 Constant *&NextPHI = NextIterVals[PHI]; 7687 if (!NextPHI) { // Not already computed. 7688 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7689 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7690 } 7691 if (NextPHI != I.second) 7692 StoppedEvolving = false; 7693 } 7694 7695 // If all entries in CurrentIterVals == NextIterVals then we can stop 7696 // iterating, the loop can't continue to change. 7697 if (StoppedEvolving) 7698 return RetVal = CurrentIterVals[PN]; 7699 7700 CurrentIterVals.swap(NextIterVals); 7701 } 7702 } 7703 7704 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7705 Value *Cond, 7706 bool ExitWhen) { 7707 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7708 if (!PN) return getCouldNotCompute(); 7709 7710 // If the loop is canonicalized, the PHI will have exactly two entries. 7711 // That's the only form we support here. 7712 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7713 7714 DenseMap<Instruction *, Constant *> CurrentIterVals; 7715 BasicBlock *Header = L->getHeader(); 7716 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7717 7718 BasicBlock *Latch = L->getLoopLatch(); 7719 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7720 7721 for (PHINode &PHI : Header->phis()) { 7722 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7723 CurrentIterVals[&PHI] = StartCST; 7724 } 7725 if (!CurrentIterVals.count(PN)) 7726 return getCouldNotCompute(); 7727 7728 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7729 // the loop symbolically to determine when the condition gets a value of 7730 // "ExitWhen". 7731 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7732 const DataLayout &DL = getDataLayout(); 7733 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7734 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7735 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7736 7737 // Couldn't symbolically evaluate. 7738 if (!CondVal) return getCouldNotCompute(); 7739 7740 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7741 ++NumBruteForceTripCountsComputed; 7742 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7743 } 7744 7745 // Update all the PHI nodes for the next iteration. 7746 DenseMap<Instruction *, Constant *> NextIterVals; 7747 7748 // Create a list of which PHIs we need to compute. We want to do this before 7749 // calling EvaluateExpression on them because that may invalidate iterators 7750 // into CurrentIterVals. 7751 SmallVector<PHINode *, 8> PHIsToCompute; 7752 for (const auto &I : CurrentIterVals) { 7753 PHINode *PHI = dyn_cast<PHINode>(I.first); 7754 if (!PHI || PHI->getParent() != Header) continue; 7755 PHIsToCompute.push_back(PHI); 7756 } 7757 for (PHINode *PHI : PHIsToCompute) { 7758 Constant *&NextPHI = NextIterVals[PHI]; 7759 if (NextPHI) continue; // Already computed! 7760 7761 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7762 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7763 } 7764 CurrentIterVals.swap(NextIterVals); 7765 } 7766 7767 // Too many iterations were needed to evaluate. 7768 return getCouldNotCompute(); 7769 } 7770 7771 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7772 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7773 ValuesAtScopes[V]; 7774 // Check to see if we've folded this expression at this loop before. 7775 for (auto &LS : Values) 7776 if (LS.first == L) 7777 return LS.second ? LS.second : V; 7778 7779 Values.emplace_back(L, nullptr); 7780 7781 // Otherwise compute it. 7782 const SCEV *C = computeSCEVAtScope(V, L); 7783 for (auto &LS : reverse(ValuesAtScopes[V])) 7784 if (LS.first == L) { 7785 LS.second = C; 7786 break; 7787 } 7788 return C; 7789 } 7790 7791 /// This builds up a Constant using the ConstantExpr interface. That way, we 7792 /// will return Constants for objects which aren't represented by a 7793 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7794 /// Returns NULL if the SCEV isn't representable as a Constant. 7795 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7796 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7797 case scCouldNotCompute: 7798 case scAddRecExpr: 7799 break; 7800 case scConstant: 7801 return cast<SCEVConstant>(V)->getValue(); 7802 case scUnknown: 7803 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7804 case scSignExtend: { 7805 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7806 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7807 return ConstantExpr::getSExt(CastOp, SS->getType()); 7808 break; 7809 } 7810 case scZeroExtend: { 7811 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7812 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7813 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7814 break; 7815 } 7816 case scTruncate: { 7817 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7818 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7819 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7820 break; 7821 } 7822 case scAddExpr: { 7823 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7824 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7825 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7826 unsigned AS = PTy->getAddressSpace(); 7827 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7828 C = ConstantExpr::getBitCast(C, DestPtrTy); 7829 } 7830 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7831 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7832 if (!C2) return nullptr; 7833 7834 // First pointer! 7835 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7836 unsigned AS = C2->getType()->getPointerAddressSpace(); 7837 std::swap(C, C2); 7838 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7839 // The offsets have been converted to bytes. We can add bytes to an 7840 // i8* by GEP with the byte count in the first index. 7841 C = ConstantExpr::getBitCast(C, DestPtrTy); 7842 } 7843 7844 // Don't bother trying to sum two pointers. We probably can't 7845 // statically compute a load that results from it anyway. 7846 if (C2->getType()->isPointerTy()) 7847 return nullptr; 7848 7849 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7850 if (PTy->getElementType()->isStructTy()) 7851 C2 = ConstantExpr::getIntegerCast( 7852 C2, Type::getInt32Ty(C->getContext()), true); 7853 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7854 } else 7855 C = ConstantExpr::getAdd(C, C2); 7856 } 7857 return C; 7858 } 7859 break; 7860 } 7861 case scMulExpr: { 7862 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7863 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7864 // Don't bother with pointers at all. 7865 if (C->getType()->isPointerTy()) return nullptr; 7866 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7867 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7868 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7869 C = ConstantExpr::getMul(C, C2); 7870 } 7871 return C; 7872 } 7873 break; 7874 } 7875 case scUDivExpr: { 7876 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7877 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7878 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7879 if (LHS->getType() == RHS->getType()) 7880 return ConstantExpr::getUDiv(LHS, RHS); 7881 break; 7882 } 7883 case scSMaxExpr: 7884 case scUMaxExpr: 7885 break; // TODO: smax, umax. 7886 } 7887 return nullptr; 7888 } 7889 7890 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7891 if (isa<SCEVConstant>(V)) return V; 7892 7893 // If this instruction is evolved from a constant-evolving PHI, compute the 7894 // exit value from the loop without using SCEVs. 7895 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 7896 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 7897 const Loop *LI = this->LI[I->getParent()]; 7898 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 7899 if (PHINode *PN = dyn_cast<PHINode>(I)) 7900 if (PN->getParent() == LI->getHeader()) { 7901 // Okay, there is no closed form solution for the PHI node. Check 7902 // to see if the loop that contains it has a known backedge-taken 7903 // count. If so, we may be able to force computation of the exit 7904 // value. 7905 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 7906 if (const SCEVConstant *BTCC = 7907 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 7908 7909 // This trivial case can show up in some degenerate cases where 7910 // the incoming IR has not yet been fully simplified. 7911 if (BTCC->getValue()->isZero()) { 7912 Value *InitValue = nullptr; 7913 bool MultipleInitValues = false; 7914 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 7915 if (!LI->contains(PN->getIncomingBlock(i))) { 7916 if (!InitValue) 7917 InitValue = PN->getIncomingValue(i); 7918 else if (InitValue != PN->getIncomingValue(i)) { 7919 MultipleInitValues = true; 7920 break; 7921 } 7922 } 7923 if (!MultipleInitValues && InitValue) 7924 return getSCEV(InitValue); 7925 } 7926 } 7927 // Okay, we know how many times the containing loop executes. If 7928 // this is a constant evolving PHI node, get the final value at 7929 // the specified iteration number. 7930 Constant *RV = 7931 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 7932 if (RV) return getSCEV(RV); 7933 } 7934 } 7935 7936 // Okay, this is an expression that we cannot symbolically evaluate 7937 // into a SCEV. Check to see if it's possible to symbolically evaluate 7938 // the arguments into constants, and if so, try to constant propagate the 7939 // result. This is particularly useful for computing loop exit values. 7940 if (CanConstantFold(I)) { 7941 SmallVector<Constant *, 4> Operands; 7942 bool MadeImprovement = false; 7943 for (Value *Op : I->operands()) { 7944 if (Constant *C = dyn_cast<Constant>(Op)) { 7945 Operands.push_back(C); 7946 continue; 7947 } 7948 7949 // If any of the operands is non-constant and if they are 7950 // non-integer and non-pointer, don't even try to analyze them 7951 // with scev techniques. 7952 if (!isSCEVable(Op->getType())) 7953 return V; 7954 7955 const SCEV *OrigV = getSCEV(Op); 7956 const SCEV *OpV = getSCEVAtScope(OrigV, L); 7957 MadeImprovement |= OrigV != OpV; 7958 7959 Constant *C = BuildConstantFromSCEV(OpV); 7960 if (!C) return V; 7961 if (C->getType() != Op->getType()) 7962 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 7963 Op->getType(), 7964 false), 7965 C, Op->getType()); 7966 Operands.push_back(C); 7967 } 7968 7969 // Check to see if getSCEVAtScope actually made an improvement. 7970 if (MadeImprovement) { 7971 Constant *C = nullptr; 7972 const DataLayout &DL = getDataLayout(); 7973 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 7974 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7975 Operands[1], DL, &TLI); 7976 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 7977 if (!LI->isVolatile()) 7978 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7979 } else 7980 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 7981 if (!C) return V; 7982 return getSCEV(C); 7983 } 7984 } 7985 } 7986 7987 // This is some other type of SCEVUnknown, just return it. 7988 return V; 7989 } 7990 7991 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 7992 // Avoid performing the look-up in the common case where the specified 7993 // expression has no loop-variant portions. 7994 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 7995 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7996 if (OpAtScope != Comm->getOperand(i)) { 7997 // Okay, at least one of these operands is loop variant but might be 7998 // foldable. Build a new instance of the folded commutative expression. 7999 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8000 Comm->op_begin()+i); 8001 NewOps.push_back(OpAtScope); 8002 8003 for (++i; i != e; ++i) { 8004 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8005 NewOps.push_back(OpAtScope); 8006 } 8007 if (isa<SCEVAddExpr>(Comm)) 8008 return getAddExpr(NewOps); 8009 if (isa<SCEVMulExpr>(Comm)) 8010 return getMulExpr(NewOps); 8011 if (isa<SCEVSMaxExpr>(Comm)) 8012 return getSMaxExpr(NewOps); 8013 if (isa<SCEVUMaxExpr>(Comm)) 8014 return getUMaxExpr(NewOps); 8015 llvm_unreachable("Unknown commutative SCEV type!"); 8016 } 8017 } 8018 // If we got here, all operands are loop invariant. 8019 return Comm; 8020 } 8021 8022 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8023 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8024 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8025 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8026 return Div; // must be loop invariant 8027 return getUDivExpr(LHS, RHS); 8028 } 8029 8030 // If this is a loop recurrence for a loop that does not contain L, then we 8031 // are dealing with the final value computed by the loop. 8032 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8033 // First, attempt to evaluate each operand. 8034 // Avoid performing the look-up in the common case where the specified 8035 // expression has no loop-variant portions. 8036 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8037 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8038 if (OpAtScope == AddRec->getOperand(i)) 8039 continue; 8040 8041 // Okay, at least one of these operands is loop variant but might be 8042 // foldable. Build a new instance of the folded commutative expression. 8043 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8044 AddRec->op_begin()+i); 8045 NewOps.push_back(OpAtScope); 8046 for (++i; i != e; ++i) 8047 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8048 8049 const SCEV *FoldedRec = 8050 getAddRecExpr(NewOps, AddRec->getLoop(), 8051 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8052 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8053 // The addrec may be folded to a nonrecurrence, for example, if the 8054 // induction variable is multiplied by zero after constant folding. Go 8055 // ahead and return the folded value. 8056 if (!AddRec) 8057 return FoldedRec; 8058 break; 8059 } 8060 8061 // If the scope is outside the addrec's loop, evaluate it by using the 8062 // loop exit value of the addrec. 8063 if (!AddRec->getLoop()->contains(L)) { 8064 // To evaluate this recurrence, we need to know how many times the AddRec 8065 // loop iterates. Compute this now. 8066 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8067 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8068 8069 // Then, evaluate the AddRec. 8070 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8071 } 8072 8073 return AddRec; 8074 } 8075 8076 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8077 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8078 if (Op == Cast->getOperand()) 8079 return Cast; // must be loop invariant 8080 return getZeroExtendExpr(Op, Cast->getType()); 8081 } 8082 8083 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8084 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8085 if (Op == Cast->getOperand()) 8086 return Cast; // must be loop invariant 8087 return getSignExtendExpr(Op, Cast->getType()); 8088 } 8089 8090 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8091 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8092 if (Op == Cast->getOperand()) 8093 return Cast; // must be loop invariant 8094 return getTruncateExpr(Op, Cast->getType()); 8095 } 8096 8097 llvm_unreachable("Unknown SCEV type!"); 8098 } 8099 8100 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8101 return getSCEVAtScope(getSCEV(V), L); 8102 } 8103 8104 /// Finds the minimum unsigned root of the following equation: 8105 /// 8106 /// A * X = B (mod N) 8107 /// 8108 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8109 /// A and B isn't important. 8110 /// 8111 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8112 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8113 ScalarEvolution &SE) { 8114 uint32_t BW = A.getBitWidth(); 8115 assert(BW == SE.getTypeSizeInBits(B->getType())); 8116 assert(A != 0 && "A must be non-zero."); 8117 8118 // 1. D = gcd(A, N) 8119 // 8120 // The gcd of A and N may have only one prime factor: 2. The number of 8121 // trailing zeros in A is its multiplicity 8122 uint32_t Mult2 = A.countTrailingZeros(); 8123 // D = 2^Mult2 8124 8125 // 2. Check if B is divisible by D. 8126 // 8127 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8128 // is not less than multiplicity of this prime factor for D. 8129 if (SE.GetMinTrailingZeros(B) < Mult2) 8130 return SE.getCouldNotCompute(); 8131 8132 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8133 // modulo (N / D). 8134 // 8135 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8136 // (N / D) in general. The inverse itself always fits into BW bits, though, 8137 // so we immediately truncate it. 8138 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8139 APInt Mod(BW + 1, 0); 8140 Mod.setBit(BW - Mult2); // Mod = N / D 8141 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8142 8143 // 4. Compute the minimum unsigned root of the equation: 8144 // I * (B / D) mod (N / D) 8145 // To simplify the computation, we factor out the divide by D: 8146 // (I * B mod N) / D 8147 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8148 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8149 } 8150 8151 /// Find the roots of the quadratic equation for the given quadratic chrec 8152 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 8153 /// two SCEVCouldNotCompute objects. 8154 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 8155 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8156 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8157 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8158 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8159 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8160 8161 // We currently can only solve this if the coefficients are constants. 8162 if (!LC || !MC || !NC) 8163 return None; 8164 8165 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 8166 const APInt &L = LC->getAPInt(); 8167 const APInt &M = MC->getAPInt(); 8168 const APInt &N = NC->getAPInt(); 8169 APInt Two(BitWidth, 2); 8170 8171 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 8172 8173 // The A coefficient is N/2 8174 APInt A = N.sdiv(Two); 8175 8176 // The B coefficient is M-N/2 8177 APInt B = M; 8178 B -= A; // A is the same as N/2. 8179 8180 // The C coefficient is L. 8181 const APInt& C = L; 8182 8183 // Compute the B^2-4ac term. 8184 APInt SqrtTerm = B; 8185 SqrtTerm *= B; 8186 SqrtTerm -= 4 * (A * C); 8187 8188 if (SqrtTerm.isNegative()) { 8189 // The loop is provably infinite. 8190 return None; 8191 } 8192 8193 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 8194 // integer value or else APInt::sqrt() will assert. 8195 APInt SqrtVal = SqrtTerm.sqrt(); 8196 8197 // Compute the two solutions for the quadratic formula. 8198 // The divisions must be performed as signed divisions. 8199 APInt NegB = -std::move(B); 8200 APInt TwoA = std::move(A); 8201 TwoA <<= 1; 8202 if (TwoA.isNullValue()) 8203 return None; 8204 8205 LLVMContext &Context = SE.getContext(); 8206 8207 ConstantInt *Solution1 = 8208 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 8209 ConstantInt *Solution2 = 8210 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 8211 8212 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 8213 cast<SCEVConstant>(SE.getConstant(Solution2))); 8214 } 8215 8216 ScalarEvolution::ExitLimit 8217 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8218 bool AllowPredicates) { 8219 8220 // This is only used for loops with a "x != y" exit test. The exit condition 8221 // is now expressed as a single expression, V = x-y. So the exit test is 8222 // effectively V != 0. We know and take advantage of the fact that this 8223 // expression only being used in a comparison by zero context. 8224 8225 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8226 // If the value is a constant 8227 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8228 // If the value is already zero, the branch will execute zero times. 8229 if (C->getValue()->isZero()) return C; 8230 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8231 } 8232 8233 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 8234 if (!AddRec && AllowPredicates) 8235 // Try to make this an AddRec using runtime tests, in the first X 8236 // iterations of this loop, where X is the SCEV expression found by the 8237 // algorithm below. 8238 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8239 8240 if (!AddRec || AddRec->getLoop() != L) 8241 return getCouldNotCompute(); 8242 8243 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8244 // the quadratic equation to solve it. 8245 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8246 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 8247 const SCEVConstant *R1 = Roots->first; 8248 const SCEVConstant *R2 = Roots->second; 8249 // Pick the smallest positive root value. 8250 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8251 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8252 if (!CB->getZExtValue()) 8253 std::swap(R1, R2); // R1 is the minimum root now. 8254 8255 // We can only use this value if the chrec ends up with an exact zero 8256 // value at this index. When solving for "X*X != 5", for example, we 8257 // should not accept a root of 2. 8258 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 8259 if (Val->isZero()) 8260 // We found a quadratic root! 8261 return ExitLimit(R1, R1, false, Predicates); 8262 } 8263 } 8264 return getCouldNotCompute(); 8265 } 8266 8267 // Otherwise we can only handle this if it is affine. 8268 if (!AddRec->isAffine()) 8269 return getCouldNotCompute(); 8270 8271 // If this is an affine expression, the execution count of this branch is 8272 // the minimum unsigned root of the following equation: 8273 // 8274 // Start + Step*N = 0 (mod 2^BW) 8275 // 8276 // equivalent to: 8277 // 8278 // Step*N = -Start (mod 2^BW) 8279 // 8280 // where BW is the common bit width of Start and Step. 8281 8282 // Get the initial value for the loop. 8283 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8284 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8285 8286 // For now we handle only constant steps. 8287 // 8288 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8289 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8290 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8291 // We have not yet seen any such cases. 8292 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8293 if (!StepC || StepC->getValue()->isZero()) 8294 return getCouldNotCompute(); 8295 8296 // For positive steps (counting up until unsigned overflow): 8297 // N = -Start/Step (as unsigned) 8298 // For negative steps (counting down to zero): 8299 // N = Start/-Step 8300 // First compute the unsigned distance from zero in the direction of Step. 8301 bool CountDown = StepC->getAPInt().isNegative(); 8302 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8303 8304 // Handle unitary steps, which cannot wraparound. 8305 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8306 // N = Distance (as unsigned) 8307 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8308 APInt MaxBECount = getUnsignedRangeMax(Distance); 8309 8310 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8311 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8312 // case, and see if we can improve the bound. 8313 // 8314 // Explicitly handling this here is necessary because getUnsignedRange 8315 // isn't context-sensitive; it doesn't know that we only care about the 8316 // range inside the loop. 8317 const SCEV *Zero = getZero(Distance->getType()); 8318 const SCEV *One = getOne(Distance->getType()); 8319 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8320 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8321 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8322 // as "unsigned_max(Distance + 1) - 1". 8323 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8324 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8325 } 8326 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8327 } 8328 8329 // If the condition controls loop exit (the loop exits only if the expression 8330 // is true) and the addition is no-wrap we can use unsigned divide to 8331 // compute the backedge count. In this case, the step may not divide the 8332 // distance, but we don't care because if the condition is "missed" the loop 8333 // will have undefined behavior due to wrapping. 8334 if (ControlsExit && AddRec->hasNoSelfWrap() && 8335 loopHasNoAbnormalExits(AddRec->getLoop())) { 8336 const SCEV *Exact = 8337 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8338 const SCEV *Max = 8339 Exact == getCouldNotCompute() 8340 ? Exact 8341 : getConstant(getUnsignedRangeMax(Exact)); 8342 return ExitLimit(Exact, Max, false, Predicates); 8343 } 8344 8345 // Solve the general equation. 8346 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8347 getNegativeSCEV(Start), *this); 8348 const SCEV *M = E == getCouldNotCompute() 8349 ? E 8350 : getConstant(getUnsignedRangeMax(E)); 8351 return ExitLimit(E, M, false, Predicates); 8352 } 8353 8354 ScalarEvolution::ExitLimit 8355 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8356 // Loops that look like: while (X == 0) are very strange indeed. We don't 8357 // handle them yet except for the trivial case. This could be expanded in the 8358 // future as needed. 8359 8360 // If the value is a constant, check to see if it is known to be non-zero 8361 // already. If so, the backedge will execute zero times. 8362 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8363 if (!C->getValue()->isZero()) 8364 return getZero(C->getType()); 8365 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8366 } 8367 8368 // We could implement others, but I really doubt anyone writes loops like 8369 // this, and if they did, they would already be constant folded. 8370 return getCouldNotCompute(); 8371 } 8372 8373 std::pair<BasicBlock *, BasicBlock *> 8374 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8375 // If the block has a unique predecessor, then there is no path from the 8376 // predecessor to the block that does not go through the direct edge 8377 // from the predecessor to the block. 8378 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8379 return {Pred, BB}; 8380 8381 // A loop's header is defined to be a block that dominates the loop. 8382 // If the header has a unique predecessor outside the loop, it must be 8383 // a block that has exactly one successor that can reach the loop. 8384 if (Loop *L = LI.getLoopFor(BB)) 8385 return {L->getLoopPredecessor(), L->getHeader()}; 8386 8387 return {nullptr, nullptr}; 8388 } 8389 8390 /// SCEV structural equivalence is usually sufficient for testing whether two 8391 /// expressions are equal, however for the purposes of looking for a condition 8392 /// guarding a loop, it can be useful to be a little more general, since a 8393 /// front-end may have replicated the controlling expression. 8394 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8395 // Quick check to see if they are the same SCEV. 8396 if (A == B) return true; 8397 8398 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8399 // Not all instructions that are "identical" compute the same value. For 8400 // instance, two distinct alloca instructions allocating the same type are 8401 // identical and do not read memory; but compute distinct values. 8402 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8403 }; 8404 8405 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8406 // two different instructions with the same value. Check for this case. 8407 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8408 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8409 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8410 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8411 if (ComputesEqualValues(AI, BI)) 8412 return true; 8413 8414 // Otherwise assume they may have a different value. 8415 return false; 8416 } 8417 8418 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8419 const SCEV *&LHS, const SCEV *&RHS, 8420 unsigned Depth) { 8421 bool Changed = false; 8422 8423 // If we hit the max recursion limit bail out. 8424 if (Depth >= 3) 8425 return false; 8426 8427 // Canonicalize a constant to the right side. 8428 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8429 // Check for both operands constant. 8430 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8431 if (ConstantExpr::getICmp(Pred, 8432 LHSC->getValue(), 8433 RHSC->getValue())->isNullValue()) 8434 goto trivially_false; 8435 else 8436 goto trivially_true; 8437 } 8438 // Otherwise swap the operands to put the constant on the right. 8439 std::swap(LHS, RHS); 8440 Pred = ICmpInst::getSwappedPredicate(Pred); 8441 Changed = true; 8442 } 8443 8444 // If we're comparing an addrec with a value which is loop-invariant in the 8445 // addrec's loop, put the addrec on the left. Also make a dominance check, 8446 // as both operands could be addrecs loop-invariant in each other's loop. 8447 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8448 const Loop *L = AR->getLoop(); 8449 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8450 std::swap(LHS, RHS); 8451 Pred = ICmpInst::getSwappedPredicate(Pred); 8452 Changed = true; 8453 } 8454 } 8455 8456 // If there's a constant operand, canonicalize comparisons with boundary 8457 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8458 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8459 const APInt &RA = RC->getAPInt(); 8460 8461 bool SimplifiedByConstantRange = false; 8462 8463 if (!ICmpInst::isEquality(Pred)) { 8464 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8465 if (ExactCR.isFullSet()) 8466 goto trivially_true; 8467 else if (ExactCR.isEmptySet()) 8468 goto trivially_false; 8469 8470 APInt NewRHS; 8471 CmpInst::Predicate NewPred; 8472 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8473 ICmpInst::isEquality(NewPred)) { 8474 // We were able to convert an inequality to an equality. 8475 Pred = NewPred; 8476 RHS = getConstant(NewRHS); 8477 Changed = SimplifiedByConstantRange = true; 8478 } 8479 } 8480 8481 if (!SimplifiedByConstantRange) { 8482 switch (Pred) { 8483 default: 8484 break; 8485 case ICmpInst::ICMP_EQ: 8486 case ICmpInst::ICMP_NE: 8487 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8488 if (!RA) 8489 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8490 if (const SCEVMulExpr *ME = 8491 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8492 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8493 ME->getOperand(0)->isAllOnesValue()) { 8494 RHS = AE->getOperand(1); 8495 LHS = ME->getOperand(1); 8496 Changed = true; 8497 } 8498 break; 8499 8500 8501 // The "Should have been caught earlier!" messages refer to the fact 8502 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8503 // should have fired on the corresponding cases, and canonicalized the 8504 // check to trivially_true or trivially_false. 8505 8506 case ICmpInst::ICMP_UGE: 8507 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8508 Pred = ICmpInst::ICMP_UGT; 8509 RHS = getConstant(RA - 1); 8510 Changed = true; 8511 break; 8512 case ICmpInst::ICMP_ULE: 8513 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8514 Pred = ICmpInst::ICMP_ULT; 8515 RHS = getConstant(RA + 1); 8516 Changed = true; 8517 break; 8518 case ICmpInst::ICMP_SGE: 8519 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8520 Pred = ICmpInst::ICMP_SGT; 8521 RHS = getConstant(RA - 1); 8522 Changed = true; 8523 break; 8524 case ICmpInst::ICMP_SLE: 8525 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8526 Pred = ICmpInst::ICMP_SLT; 8527 RHS = getConstant(RA + 1); 8528 Changed = true; 8529 break; 8530 } 8531 } 8532 } 8533 8534 // Check for obvious equality. 8535 if (HasSameValue(LHS, RHS)) { 8536 if (ICmpInst::isTrueWhenEqual(Pred)) 8537 goto trivially_true; 8538 if (ICmpInst::isFalseWhenEqual(Pred)) 8539 goto trivially_false; 8540 } 8541 8542 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8543 // adding or subtracting 1 from one of the operands. 8544 switch (Pred) { 8545 case ICmpInst::ICMP_SLE: 8546 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8547 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8548 SCEV::FlagNSW); 8549 Pred = ICmpInst::ICMP_SLT; 8550 Changed = true; 8551 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8552 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8553 SCEV::FlagNSW); 8554 Pred = ICmpInst::ICMP_SLT; 8555 Changed = true; 8556 } 8557 break; 8558 case ICmpInst::ICMP_SGE: 8559 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8560 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8561 SCEV::FlagNSW); 8562 Pred = ICmpInst::ICMP_SGT; 8563 Changed = true; 8564 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8565 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8566 SCEV::FlagNSW); 8567 Pred = ICmpInst::ICMP_SGT; 8568 Changed = true; 8569 } 8570 break; 8571 case ICmpInst::ICMP_ULE: 8572 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8573 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8574 SCEV::FlagNUW); 8575 Pred = ICmpInst::ICMP_ULT; 8576 Changed = true; 8577 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8578 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8579 Pred = ICmpInst::ICMP_ULT; 8580 Changed = true; 8581 } 8582 break; 8583 case ICmpInst::ICMP_UGE: 8584 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8585 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8586 Pred = ICmpInst::ICMP_UGT; 8587 Changed = true; 8588 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8589 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8590 SCEV::FlagNUW); 8591 Pred = ICmpInst::ICMP_UGT; 8592 Changed = true; 8593 } 8594 break; 8595 default: 8596 break; 8597 } 8598 8599 // TODO: More simplifications are possible here. 8600 8601 // Recursively simplify until we either hit a recursion limit or nothing 8602 // changes. 8603 if (Changed) 8604 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8605 8606 return Changed; 8607 8608 trivially_true: 8609 // Return 0 == 0. 8610 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8611 Pred = ICmpInst::ICMP_EQ; 8612 return true; 8613 8614 trivially_false: 8615 // Return 0 != 0. 8616 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8617 Pred = ICmpInst::ICMP_NE; 8618 return true; 8619 } 8620 8621 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 8622 return getSignedRangeMax(S).isNegative(); 8623 } 8624 8625 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 8626 return getSignedRangeMin(S).isStrictlyPositive(); 8627 } 8628 8629 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 8630 return !getSignedRangeMin(S).isNegative(); 8631 } 8632 8633 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 8634 return !getSignedRangeMax(S).isStrictlyPositive(); 8635 } 8636 8637 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 8638 return isKnownNegative(S) || isKnownPositive(S); 8639 } 8640 8641 std::pair<const SCEV *, const SCEV *> 8642 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 8643 // Compute SCEV on entry of loop L. 8644 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 8645 if (Start == getCouldNotCompute()) 8646 return { Start, Start }; 8647 // Compute post increment SCEV for loop L. 8648 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 8649 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 8650 return { Start, PostInc }; 8651 } 8652 8653 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 8654 const SCEV *LHS, const SCEV *RHS) { 8655 // First collect all loops. 8656 SmallPtrSet<const Loop *, 8> LoopsUsed; 8657 getUsedLoops(LHS, LoopsUsed); 8658 getUsedLoops(RHS, LoopsUsed); 8659 8660 if (LoopsUsed.empty()) 8661 return false; 8662 8663 // Domination relationship must be a linear order on collected loops. 8664 #ifndef NDEBUG 8665 for (auto *L1 : LoopsUsed) 8666 for (auto *L2 : LoopsUsed) 8667 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 8668 DT.dominates(L2->getHeader(), L1->getHeader())) && 8669 "Domination relationship is not a linear order"); 8670 #endif 8671 8672 const Loop *MDL = 8673 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 8674 [&](const Loop *L1, const Loop *L2) { 8675 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 8676 }); 8677 8678 // Get init and post increment value for LHS. 8679 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 8680 // if LHS contains unknown non-invariant SCEV then bail out. 8681 if (SplitLHS.first == getCouldNotCompute()) 8682 return false; 8683 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 8684 // Get init and post increment value for RHS. 8685 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 8686 // if RHS contains unknown non-invariant SCEV then bail out. 8687 if (SplitRHS.first == getCouldNotCompute()) 8688 return false; 8689 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 8690 // It is possible that init SCEV contains an invariant load but it does 8691 // not dominate MDL and is not available at MDL loop entry, so we should 8692 // check it here. 8693 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 8694 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 8695 return false; 8696 8697 return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) && 8698 isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 8699 SplitRHS.second); 8700 } 8701 8702 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 8703 const SCEV *LHS, const SCEV *RHS) { 8704 // Canonicalize the inputs first. 8705 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8706 8707 if (isKnownViaInduction(Pred, LHS, RHS)) 8708 return true; 8709 8710 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8711 return true; 8712 8713 // Otherwise see what can be done with some simple reasoning. 8714 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 8715 } 8716 8717 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 8718 const SCEVAddRecExpr *LHS, 8719 const SCEV *RHS) { 8720 const Loop *L = LHS->getLoop(); 8721 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 8722 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 8723 } 8724 8725 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8726 ICmpInst::Predicate Pred, 8727 bool &Increasing) { 8728 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8729 8730 #ifndef NDEBUG 8731 // Verify an invariant: inverting the predicate should turn a monotonically 8732 // increasing change to a monotonically decreasing one, and vice versa. 8733 bool IncreasingSwapped; 8734 bool ResultSwapped = isMonotonicPredicateImpl( 8735 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8736 8737 assert(Result == ResultSwapped && "should be able to analyze both!"); 8738 if (ResultSwapped) 8739 assert(Increasing == !IncreasingSwapped && 8740 "monotonicity should flip as we flip the predicate"); 8741 #endif 8742 8743 return Result; 8744 } 8745 8746 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8747 ICmpInst::Predicate Pred, 8748 bool &Increasing) { 8749 8750 // A zero step value for LHS means the induction variable is essentially a 8751 // loop invariant value. We don't really depend on the predicate actually 8752 // flipping from false to true (for increasing predicates, and the other way 8753 // around for decreasing predicates), all we care about is that *if* the 8754 // predicate changes then it only changes from false to true. 8755 // 8756 // A zero step value in itself is not very useful, but there may be places 8757 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8758 // as general as possible. 8759 8760 switch (Pred) { 8761 default: 8762 return false; // Conservative answer 8763 8764 case ICmpInst::ICMP_UGT: 8765 case ICmpInst::ICMP_UGE: 8766 case ICmpInst::ICMP_ULT: 8767 case ICmpInst::ICMP_ULE: 8768 if (!LHS->hasNoUnsignedWrap()) 8769 return false; 8770 8771 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8772 return true; 8773 8774 case ICmpInst::ICMP_SGT: 8775 case ICmpInst::ICMP_SGE: 8776 case ICmpInst::ICMP_SLT: 8777 case ICmpInst::ICMP_SLE: { 8778 if (!LHS->hasNoSignedWrap()) 8779 return false; 8780 8781 const SCEV *Step = LHS->getStepRecurrence(*this); 8782 8783 if (isKnownNonNegative(Step)) { 8784 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8785 return true; 8786 } 8787 8788 if (isKnownNonPositive(Step)) { 8789 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8790 return true; 8791 } 8792 8793 return false; 8794 } 8795 8796 } 8797 8798 llvm_unreachable("switch has default clause!"); 8799 } 8800 8801 bool ScalarEvolution::isLoopInvariantPredicate( 8802 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8803 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8804 const SCEV *&InvariantRHS) { 8805 8806 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8807 if (!isLoopInvariant(RHS, L)) { 8808 if (!isLoopInvariant(LHS, L)) 8809 return false; 8810 8811 std::swap(LHS, RHS); 8812 Pred = ICmpInst::getSwappedPredicate(Pred); 8813 } 8814 8815 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8816 if (!ArLHS || ArLHS->getLoop() != L) 8817 return false; 8818 8819 bool Increasing; 8820 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8821 return false; 8822 8823 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8824 // true as the loop iterates, and the backedge is control dependent on 8825 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8826 // 8827 // * if the predicate was false in the first iteration then the predicate 8828 // is never evaluated again, since the loop exits without taking the 8829 // backedge. 8830 // * if the predicate was true in the first iteration then it will 8831 // continue to be true for all future iterations since it is 8832 // monotonically increasing. 8833 // 8834 // For both the above possibilities, we can replace the loop varying 8835 // predicate with its value on the first iteration of the loop (which is 8836 // loop invariant). 8837 // 8838 // A similar reasoning applies for a monotonically decreasing predicate, by 8839 // replacing true with false and false with true in the above two bullets. 8840 8841 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8842 8843 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8844 return false; 8845 8846 InvariantPred = Pred; 8847 InvariantLHS = ArLHS->getStart(); 8848 InvariantRHS = RHS; 8849 return true; 8850 } 8851 8852 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8853 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8854 if (HasSameValue(LHS, RHS)) 8855 return ICmpInst::isTrueWhenEqual(Pred); 8856 8857 // This code is split out from isKnownPredicate because it is called from 8858 // within isLoopEntryGuardedByCond. 8859 8860 auto CheckRanges = 8861 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8862 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8863 .contains(RangeLHS); 8864 }; 8865 8866 // The check at the top of the function catches the case where the values are 8867 // known to be equal. 8868 if (Pred == CmpInst::ICMP_EQ) 8869 return false; 8870 8871 if (Pred == CmpInst::ICMP_NE) 8872 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8873 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8874 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8875 8876 if (CmpInst::isSigned(Pred)) 8877 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8878 8879 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8880 } 8881 8882 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8883 const SCEV *LHS, 8884 const SCEV *RHS) { 8885 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 8886 // Return Y via OutY. 8887 auto MatchBinaryAddToConst = 8888 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 8889 SCEV::NoWrapFlags ExpectedFlags) { 8890 const SCEV *NonConstOp, *ConstOp; 8891 SCEV::NoWrapFlags FlagsPresent; 8892 8893 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 8894 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 8895 return false; 8896 8897 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 8898 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 8899 }; 8900 8901 APInt C; 8902 8903 switch (Pred) { 8904 default: 8905 break; 8906 8907 case ICmpInst::ICMP_SGE: 8908 std::swap(LHS, RHS); 8909 LLVM_FALLTHROUGH; 8910 case ICmpInst::ICMP_SLE: 8911 // X s<= (X + C)<nsw> if C >= 0 8912 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 8913 return true; 8914 8915 // (X + C)<nsw> s<= X if C <= 0 8916 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 8917 !C.isStrictlyPositive()) 8918 return true; 8919 break; 8920 8921 case ICmpInst::ICMP_SGT: 8922 std::swap(LHS, RHS); 8923 LLVM_FALLTHROUGH; 8924 case ICmpInst::ICMP_SLT: 8925 // X s< (X + C)<nsw> if C > 0 8926 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 8927 C.isStrictlyPositive()) 8928 return true; 8929 8930 // (X + C)<nsw> s< X if C < 0 8931 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 8932 return true; 8933 break; 8934 } 8935 8936 return false; 8937 } 8938 8939 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 8940 const SCEV *LHS, 8941 const SCEV *RHS) { 8942 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 8943 return false; 8944 8945 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 8946 // the stack can result in exponential time complexity. 8947 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 8948 8949 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 8950 // 8951 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 8952 // isKnownPredicate. isKnownPredicate is more powerful, but also more 8953 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 8954 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 8955 // use isKnownPredicate later if needed. 8956 return isKnownNonNegative(RHS) && 8957 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 8958 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 8959 } 8960 8961 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 8962 ICmpInst::Predicate Pred, 8963 const SCEV *LHS, const SCEV *RHS) { 8964 // No need to even try if we know the module has no guards. 8965 if (!HasGuards) 8966 return false; 8967 8968 return any_of(*BB, [&](Instruction &I) { 8969 using namespace llvm::PatternMatch; 8970 8971 Value *Condition; 8972 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 8973 m_Value(Condition))) && 8974 isImpliedCond(Pred, LHS, RHS, Condition, false); 8975 }); 8976 } 8977 8978 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 8979 /// protected by a conditional between LHS and RHS. This is used to 8980 /// to eliminate casts. 8981 bool 8982 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 8983 ICmpInst::Predicate Pred, 8984 const SCEV *LHS, const SCEV *RHS) { 8985 // Interpret a null as meaning no loop, where there is obviously no guard 8986 // (interprocedural conditions notwithstanding). 8987 if (!L) return true; 8988 8989 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 8990 return true; 8991 8992 BasicBlock *Latch = L->getLoopLatch(); 8993 if (!Latch) 8994 return false; 8995 8996 BranchInst *LoopContinuePredicate = 8997 dyn_cast<BranchInst>(Latch->getTerminator()); 8998 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 8999 isImpliedCond(Pred, LHS, RHS, 9000 LoopContinuePredicate->getCondition(), 9001 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9002 return true; 9003 9004 // We don't want more than one activation of the following loops on the stack 9005 // -- that can lead to O(n!) time complexity. 9006 if (WalkingBEDominatingConds) 9007 return false; 9008 9009 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9010 9011 // See if we can exploit a trip count to prove the predicate. 9012 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9013 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9014 if (LatchBECount != getCouldNotCompute()) { 9015 // We know that Latch branches back to the loop header exactly 9016 // LatchBECount times. This means the backdege condition at Latch is 9017 // equivalent to "{0,+,1} u< LatchBECount". 9018 Type *Ty = LatchBECount->getType(); 9019 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9020 const SCEV *LoopCounter = 9021 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9022 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9023 LatchBECount)) 9024 return true; 9025 } 9026 9027 // Check conditions due to any @llvm.assume intrinsics. 9028 for (auto &AssumeVH : AC.assumptions()) { 9029 if (!AssumeVH) 9030 continue; 9031 auto *CI = cast<CallInst>(AssumeVH); 9032 if (!DT.dominates(CI, Latch->getTerminator())) 9033 continue; 9034 9035 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9036 return true; 9037 } 9038 9039 // If the loop is not reachable from the entry block, we risk running into an 9040 // infinite loop as we walk up into the dom tree. These loops do not matter 9041 // anyway, so we just return a conservative answer when we see them. 9042 if (!DT.isReachableFromEntry(L->getHeader())) 9043 return false; 9044 9045 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9046 return true; 9047 9048 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9049 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9050 assert(DTN && "should reach the loop header before reaching the root!"); 9051 9052 BasicBlock *BB = DTN->getBlock(); 9053 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9054 return true; 9055 9056 BasicBlock *PBB = BB->getSinglePredecessor(); 9057 if (!PBB) 9058 continue; 9059 9060 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9061 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9062 continue; 9063 9064 Value *Condition = ContinuePredicate->getCondition(); 9065 9066 // If we have an edge `E` within the loop body that dominates the only 9067 // latch, the condition guarding `E` also guards the backedge. This 9068 // reasoning works only for loops with a single latch. 9069 9070 BasicBlockEdge DominatingEdge(PBB, BB); 9071 if (DominatingEdge.isSingleEdge()) { 9072 // We're constructively (and conservatively) enumerating edges within the 9073 // loop body that dominate the latch. The dominator tree better agree 9074 // with us on this: 9075 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9076 9077 if (isImpliedCond(Pred, LHS, RHS, Condition, 9078 BB != ContinuePredicate->getSuccessor(0))) 9079 return true; 9080 } 9081 } 9082 9083 return false; 9084 } 9085 9086 bool 9087 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9088 ICmpInst::Predicate Pred, 9089 const SCEV *LHS, const SCEV *RHS) { 9090 // Interpret a null as meaning no loop, where there is obviously no guard 9091 // (interprocedural conditions notwithstanding). 9092 if (!L) return false; 9093 9094 // Both LHS and RHS must be available at loop entry. 9095 assert(isAvailableAtLoopEntry(LHS, L) && 9096 "LHS is not available at Loop Entry"); 9097 assert(isAvailableAtLoopEntry(RHS, L) && 9098 "RHS is not available at Loop Entry"); 9099 9100 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9101 return true; 9102 9103 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9104 // the facts (a >= b && a != b) separately. A typical situation is when the 9105 // non-strict comparison is known from ranges and non-equality is known from 9106 // dominating predicates. If we are proving strict comparison, we always try 9107 // to prove non-equality and non-strict comparison separately. 9108 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9109 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9110 bool ProvedNonStrictComparison = false; 9111 bool ProvedNonEquality = false; 9112 9113 if (ProvingStrictComparison) { 9114 ProvedNonStrictComparison = 9115 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9116 ProvedNonEquality = 9117 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9118 if (ProvedNonStrictComparison && ProvedNonEquality) 9119 return true; 9120 } 9121 9122 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9123 auto ProveViaGuard = [&](BasicBlock *Block) { 9124 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9125 return true; 9126 if (ProvingStrictComparison) { 9127 if (!ProvedNonStrictComparison) 9128 ProvedNonStrictComparison = 9129 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9130 if (!ProvedNonEquality) 9131 ProvedNonEquality = 9132 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9133 if (ProvedNonStrictComparison && ProvedNonEquality) 9134 return true; 9135 } 9136 return false; 9137 }; 9138 9139 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9140 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9141 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9142 return true; 9143 if (ProvingStrictComparison) { 9144 if (!ProvedNonStrictComparison) 9145 ProvedNonStrictComparison = 9146 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9147 if (!ProvedNonEquality) 9148 ProvedNonEquality = 9149 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9150 if (ProvedNonStrictComparison && ProvedNonEquality) 9151 return true; 9152 } 9153 return false; 9154 }; 9155 9156 // Starting at the loop predecessor, climb up the predecessor chain, as long 9157 // as there are predecessors that can be found that have unique successors 9158 // leading to the original header. 9159 for (std::pair<BasicBlock *, BasicBlock *> 9160 Pair(L->getLoopPredecessor(), L->getHeader()); 9161 Pair.first; 9162 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9163 9164 if (ProveViaGuard(Pair.first)) 9165 return true; 9166 9167 BranchInst *LoopEntryPredicate = 9168 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9169 if (!LoopEntryPredicate || 9170 LoopEntryPredicate->isUnconditional()) 9171 continue; 9172 9173 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9174 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9175 return true; 9176 } 9177 9178 // Check conditions due to any @llvm.assume intrinsics. 9179 for (auto &AssumeVH : AC.assumptions()) { 9180 if (!AssumeVH) 9181 continue; 9182 auto *CI = cast<CallInst>(AssumeVH); 9183 if (!DT.dominates(CI, L->getHeader())) 9184 continue; 9185 9186 if (ProveViaCond(CI->getArgOperand(0), false)) 9187 return true; 9188 } 9189 9190 return false; 9191 } 9192 9193 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9194 const SCEV *LHS, const SCEV *RHS, 9195 Value *FoundCondValue, 9196 bool Inverse) { 9197 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9198 return false; 9199 9200 auto ClearOnExit = 9201 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9202 9203 // Recursively handle And and Or conditions. 9204 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9205 if (BO->getOpcode() == Instruction::And) { 9206 if (!Inverse) 9207 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9208 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9209 } else if (BO->getOpcode() == Instruction::Or) { 9210 if (Inverse) 9211 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9212 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9213 } 9214 } 9215 9216 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9217 if (!ICI) return false; 9218 9219 // Now that we found a conditional branch that dominates the loop or controls 9220 // the loop latch. Check to see if it is the comparison we are looking for. 9221 ICmpInst::Predicate FoundPred; 9222 if (Inverse) 9223 FoundPred = ICI->getInversePredicate(); 9224 else 9225 FoundPred = ICI->getPredicate(); 9226 9227 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9228 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9229 9230 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9231 } 9232 9233 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9234 const SCEV *RHS, 9235 ICmpInst::Predicate FoundPred, 9236 const SCEV *FoundLHS, 9237 const SCEV *FoundRHS) { 9238 // Balance the types. 9239 if (getTypeSizeInBits(LHS->getType()) < 9240 getTypeSizeInBits(FoundLHS->getType())) { 9241 if (CmpInst::isSigned(Pred)) { 9242 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9243 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9244 } else { 9245 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9246 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9247 } 9248 } else if (getTypeSizeInBits(LHS->getType()) > 9249 getTypeSizeInBits(FoundLHS->getType())) { 9250 if (CmpInst::isSigned(FoundPred)) { 9251 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9252 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9253 } else { 9254 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9255 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9256 } 9257 } 9258 9259 // Canonicalize the query to match the way instcombine will have 9260 // canonicalized the comparison. 9261 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9262 if (LHS == RHS) 9263 return CmpInst::isTrueWhenEqual(Pred); 9264 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9265 if (FoundLHS == FoundRHS) 9266 return CmpInst::isFalseWhenEqual(FoundPred); 9267 9268 // Check to see if we can make the LHS or RHS match. 9269 if (LHS == FoundRHS || RHS == FoundLHS) { 9270 if (isa<SCEVConstant>(RHS)) { 9271 std::swap(FoundLHS, FoundRHS); 9272 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9273 } else { 9274 std::swap(LHS, RHS); 9275 Pred = ICmpInst::getSwappedPredicate(Pred); 9276 } 9277 } 9278 9279 // Check whether the found predicate is the same as the desired predicate. 9280 if (FoundPred == Pred) 9281 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9282 9283 // Check whether swapping the found predicate makes it the same as the 9284 // desired predicate. 9285 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9286 if (isa<SCEVConstant>(RHS)) 9287 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9288 else 9289 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9290 RHS, LHS, FoundLHS, FoundRHS); 9291 } 9292 9293 // Unsigned comparison is the same as signed comparison when both the operands 9294 // are non-negative. 9295 if (CmpInst::isUnsigned(FoundPred) && 9296 CmpInst::getSignedPredicate(FoundPred) == Pred && 9297 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9298 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9299 9300 // Check if we can make progress by sharpening ranges. 9301 if (FoundPred == ICmpInst::ICMP_NE && 9302 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9303 9304 const SCEVConstant *C = nullptr; 9305 const SCEV *V = nullptr; 9306 9307 if (isa<SCEVConstant>(FoundLHS)) { 9308 C = cast<SCEVConstant>(FoundLHS); 9309 V = FoundRHS; 9310 } else { 9311 C = cast<SCEVConstant>(FoundRHS); 9312 V = FoundLHS; 9313 } 9314 9315 // The guarding predicate tells us that C != V. If the known range 9316 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9317 // range we consider has to correspond to same signedness as the 9318 // predicate we're interested in folding. 9319 9320 APInt Min = ICmpInst::isSigned(Pred) ? 9321 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9322 9323 if (Min == C->getAPInt()) { 9324 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9325 // This is true even if (Min + 1) wraps around -- in case of 9326 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9327 9328 APInt SharperMin = Min + 1; 9329 9330 switch (Pred) { 9331 case ICmpInst::ICMP_SGE: 9332 case ICmpInst::ICMP_UGE: 9333 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9334 // RHS, we're done. 9335 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9336 getConstant(SharperMin))) 9337 return true; 9338 LLVM_FALLTHROUGH; 9339 9340 case ICmpInst::ICMP_SGT: 9341 case ICmpInst::ICMP_UGT: 9342 // We know from the range information that (V `Pred` Min || 9343 // V == Min). We know from the guarding condition that !(V 9344 // == Min). This gives us 9345 // 9346 // V `Pred` Min || V == Min && !(V == Min) 9347 // => V `Pred` Min 9348 // 9349 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9350 9351 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9352 return true; 9353 LLVM_FALLTHROUGH; 9354 9355 default: 9356 // No change 9357 break; 9358 } 9359 } 9360 } 9361 9362 // Check whether the actual condition is beyond sufficient. 9363 if (FoundPred == ICmpInst::ICMP_EQ) 9364 if (ICmpInst::isTrueWhenEqual(Pred)) 9365 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9366 return true; 9367 if (Pred == ICmpInst::ICMP_NE) 9368 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9369 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9370 return true; 9371 9372 // Otherwise assume the worst. 9373 return false; 9374 } 9375 9376 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9377 const SCEV *&L, const SCEV *&R, 9378 SCEV::NoWrapFlags &Flags) { 9379 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9380 if (!AE || AE->getNumOperands() != 2) 9381 return false; 9382 9383 L = AE->getOperand(0); 9384 R = AE->getOperand(1); 9385 Flags = AE->getNoWrapFlags(); 9386 return true; 9387 } 9388 9389 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9390 const SCEV *Less) { 9391 // We avoid subtracting expressions here because this function is usually 9392 // fairly deep in the call stack (i.e. is called many times). 9393 9394 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9395 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9396 const auto *MAR = cast<SCEVAddRecExpr>(More); 9397 9398 if (LAR->getLoop() != MAR->getLoop()) 9399 return None; 9400 9401 // We look at affine expressions only; not for correctness but to keep 9402 // getStepRecurrence cheap. 9403 if (!LAR->isAffine() || !MAR->isAffine()) 9404 return None; 9405 9406 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9407 return None; 9408 9409 Less = LAR->getStart(); 9410 More = MAR->getStart(); 9411 9412 // fall through 9413 } 9414 9415 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9416 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9417 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9418 return M - L; 9419 } 9420 9421 SCEV::NoWrapFlags Flags; 9422 const SCEV *LLess = nullptr, *RLess = nullptr; 9423 const SCEV *LMore = nullptr, *RMore = nullptr; 9424 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9425 // Compare (X + C1) vs X. 9426 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9427 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9428 if (RLess == More) 9429 return -(C1->getAPInt()); 9430 9431 // Compare X vs (X + C2). 9432 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9433 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9434 if (RMore == Less) 9435 return C2->getAPInt(); 9436 9437 // Compare (X + C1) vs (X + C2). 9438 if (C1 && C2 && RLess == RMore) 9439 return C2->getAPInt() - C1->getAPInt(); 9440 9441 return None; 9442 } 9443 9444 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9445 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9446 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9447 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9448 return false; 9449 9450 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9451 if (!AddRecLHS) 9452 return false; 9453 9454 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9455 if (!AddRecFoundLHS) 9456 return false; 9457 9458 // We'd like to let SCEV reason about control dependencies, so we constrain 9459 // both the inequalities to be about add recurrences on the same loop. This 9460 // way we can use isLoopEntryGuardedByCond later. 9461 9462 const Loop *L = AddRecFoundLHS->getLoop(); 9463 if (L != AddRecLHS->getLoop()) 9464 return false; 9465 9466 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9467 // 9468 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9469 // ... (2) 9470 // 9471 // Informal proof for (2), assuming (1) [*]: 9472 // 9473 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9474 // 9475 // Then 9476 // 9477 // FoundLHS s< FoundRHS s< INT_MIN - C 9478 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9479 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9480 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9481 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9482 // <=> FoundLHS + C s< FoundRHS + C 9483 // 9484 // [*]: (1) can be proved by ruling out overflow. 9485 // 9486 // [**]: This can be proved by analyzing all the four possibilities: 9487 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9488 // (A s>= 0, B s>= 0). 9489 // 9490 // Note: 9491 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9492 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9493 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9494 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9495 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9496 // C)". 9497 9498 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9499 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9500 if (!LDiff || !RDiff || *LDiff != *RDiff) 9501 return false; 9502 9503 if (LDiff->isMinValue()) 9504 return true; 9505 9506 APInt FoundRHSLimit; 9507 9508 if (Pred == CmpInst::ICMP_ULT) { 9509 FoundRHSLimit = -(*RDiff); 9510 } else { 9511 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9512 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9513 } 9514 9515 // Try to prove (1) or (2), as needed. 9516 return isAvailableAtLoopEntry(FoundRHS, L) && 9517 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9518 getConstant(FoundRHSLimit)); 9519 } 9520 9521 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 9522 const SCEV *LHS, const SCEV *RHS, 9523 const SCEV *FoundLHS, 9524 const SCEV *FoundRHS) { 9525 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9526 return true; 9527 9528 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9529 return true; 9530 9531 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 9532 FoundLHS, FoundRHS) || 9533 // ~x < ~y --> x > y 9534 isImpliedCondOperandsHelper(Pred, LHS, RHS, 9535 getNotSCEV(FoundRHS), 9536 getNotSCEV(FoundLHS)); 9537 } 9538 9539 /// If Expr computes ~A, return A else return nullptr 9540 static const SCEV *MatchNotExpr(const SCEV *Expr) { 9541 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 9542 if (!Add || Add->getNumOperands() != 2 || 9543 !Add->getOperand(0)->isAllOnesValue()) 9544 return nullptr; 9545 9546 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 9547 if (!AddRHS || AddRHS->getNumOperands() != 2 || 9548 !AddRHS->getOperand(0)->isAllOnesValue()) 9549 return nullptr; 9550 9551 return AddRHS->getOperand(1); 9552 } 9553 9554 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 9555 template<typename MaxExprType> 9556 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 9557 const SCEV *Candidate) { 9558 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 9559 if (!MaxExpr) return false; 9560 9561 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 9562 } 9563 9564 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 9565 template<typename MaxExprType> 9566 static bool IsMinConsistingOf(ScalarEvolution &SE, 9567 const SCEV *MaybeMinExpr, 9568 const SCEV *Candidate) { 9569 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 9570 if (!MaybeMaxExpr) 9571 return false; 9572 9573 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 9574 } 9575 9576 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 9577 ICmpInst::Predicate Pred, 9578 const SCEV *LHS, const SCEV *RHS) { 9579 // If both sides are affine addrecs for the same loop, with equal 9580 // steps, and we know the recurrences don't wrap, then we only 9581 // need to check the predicate on the starting values. 9582 9583 if (!ICmpInst::isRelational(Pred)) 9584 return false; 9585 9586 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 9587 if (!LAR) 9588 return false; 9589 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9590 if (!RAR) 9591 return false; 9592 if (LAR->getLoop() != RAR->getLoop()) 9593 return false; 9594 if (!LAR->isAffine() || !RAR->isAffine()) 9595 return false; 9596 9597 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 9598 return false; 9599 9600 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 9601 SCEV::FlagNSW : SCEV::FlagNUW; 9602 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 9603 return false; 9604 9605 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 9606 } 9607 9608 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 9609 /// expression? 9610 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 9611 ICmpInst::Predicate Pred, 9612 const SCEV *LHS, const SCEV *RHS) { 9613 switch (Pred) { 9614 default: 9615 return false; 9616 9617 case ICmpInst::ICMP_SGE: 9618 std::swap(LHS, RHS); 9619 LLVM_FALLTHROUGH; 9620 case ICmpInst::ICMP_SLE: 9621 return 9622 // min(A, ...) <= A 9623 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 9624 // A <= max(A, ...) 9625 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 9626 9627 case ICmpInst::ICMP_UGE: 9628 std::swap(LHS, RHS); 9629 LLVM_FALLTHROUGH; 9630 case ICmpInst::ICMP_ULE: 9631 return 9632 // min(A, ...) <= A 9633 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 9634 // A <= max(A, ...) 9635 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 9636 } 9637 9638 llvm_unreachable("covered switch fell through?!"); 9639 } 9640 9641 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 9642 const SCEV *LHS, const SCEV *RHS, 9643 const SCEV *FoundLHS, 9644 const SCEV *FoundRHS, 9645 unsigned Depth) { 9646 assert(getTypeSizeInBits(LHS->getType()) == 9647 getTypeSizeInBits(RHS->getType()) && 9648 "LHS and RHS have different sizes?"); 9649 assert(getTypeSizeInBits(FoundLHS->getType()) == 9650 getTypeSizeInBits(FoundRHS->getType()) && 9651 "FoundLHS and FoundRHS have different sizes?"); 9652 // We want to avoid hurting the compile time with analysis of too big trees. 9653 if (Depth > MaxSCEVOperationsImplicationDepth) 9654 return false; 9655 // We only want to work with ICMP_SGT comparison so far. 9656 // TODO: Extend to ICMP_UGT? 9657 if (Pred == ICmpInst::ICMP_SLT) { 9658 Pred = ICmpInst::ICMP_SGT; 9659 std::swap(LHS, RHS); 9660 std::swap(FoundLHS, FoundRHS); 9661 } 9662 if (Pred != ICmpInst::ICMP_SGT) 9663 return false; 9664 9665 auto GetOpFromSExt = [&](const SCEV *S) { 9666 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 9667 return Ext->getOperand(); 9668 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 9669 // the constant in some cases. 9670 return S; 9671 }; 9672 9673 // Acquire values from extensions. 9674 auto *OrigFoundLHS = FoundLHS; 9675 LHS = GetOpFromSExt(LHS); 9676 FoundLHS = GetOpFromSExt(FoundLHS); 9677 9678 // Is the SGT predicate can be proved trivially or using the found context. 9679 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 9680 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 9681 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 9682 FoundRHS, Depth + 1); 9683 }; 9684 9685 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 9686 // We want to avoid creation of any new non-constant SCEV. Since we are 9687 // going to compare the operands to RHS, we should be certain that we don't 9688 // need any size extensions for this. So let's decline all cases when the 9689 // sizes of types of LHS and RHS do not match. 9690 // TODO: Maybe try to get RHS from sext to catch more cases? 9691 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 9692 return false; 9693 9694 // Should not overflow. 9695 if (!LHSAddExpr->hasNoSignedWrap()) 9696 return false; 9697 9698 auto *LL = LHSAddExpr->getOperand(0); 9699 auto *LR = LHSAddExpr->getOperand(1); 9700 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 9701 9702 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 9703 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 9704 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 9705 }; 9706 // Try to prove the following rule: 9707 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 9708 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 9709 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 9710 return true; 9711 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 9712 Value *LL, *LR; 9713 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 9714 9715 using namespace llvm::PatternMatch; 9716 9717 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 9718 // Rules for division. 9719 // We are going to perform some comparisons with Denominator and its 9720 // derivative expressions. In general case, creating a SCEV for it may 9721 // lead to a complex analysis of the entire graph, and in particular it 9722 // can request trip count recalculation for the same loop. This would 9723 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 9724 // this, we only want to create SCEVs that are constants in this section. 9725 // So we bail if Denominator is not a constant. 9726 if (!isa<ConstantInt>(LR)) 9727 return false; 9728 9729 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 9730 9731 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 9732 // then a SCEV for the numerator already exists and matches with FoundLHS. 9733 auto *Numerator = getExistingSCEV(LL); 9734 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 9735 return false; 9736 9737 // Make sure that the numerator matches with FoundLHS and the denominator 9738 // is positive. 9739 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 9740 return false; 9741 9742 auto *DTy = Denominator->getType(); 9743 auto *FRHSTy = FoundRHS->getType(); 9744 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 9745 // One of types is a pointer and another one is not. We cannot extend 9746 // them properly to a wider type, so let us just reject this case. 9747 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 9748 // to avoid this check. 9749 return false; 9750 9751 // Given that: 9752 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 9753 auto *WTy = getWiderType(DTy, FRHSTy); 9754 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 9755 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 9756 9757 // Try to prove the following rule: 9758 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 9759 // For example, given that FoundLHS > 2. It means that FoundLHS is at 9760 // least 3. If we divide it by Denominator < 4, we will have at least 1. 9761 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 9762 if (isKnownNonPositive(RHS) && 9763 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 9764 return true; 9765 9766 // Try to prove the following rule: 9767 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 9768 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 9769 // If we divide it by Denominator > 2, then: 9770 // 1. If FoundLHS is negative, then the result is 0. 9771 // 2. If FoundLHS is non-negative, then the result is non-negative. 9772 // Anyways, the result is non-negative. 9773 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9774 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 9775 if (isKnownNegative(RHS) && 9776 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 9777 return true; 9778 } 9779 } 9780 9781 return false; 9782 } 9783 9784 bool 9785 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 9786 const SCEV *LHS, const SCEV *RHS) { 9787 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 9788 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 9789 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 9790 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 9791 } 9792 9793 bool 9794 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 9795 const SCEV *LHS, const SCEV *RHS, 9796 const SCEV *FoundLHS, 9797 const SCEV *FoundRHS) { 9798 switch (Pred) { 9799 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 9800 case ICmpInst::ICMP_EQ: 9801 case ICmpInst::ICMP_NE: 9802 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 9803 return true; 9804 break; 9805 case ICmpInst::ICMP_SLT: 9806 case ICmpInst::ICMP_SLE: 9807 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 9808 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 9809 return true; 9810 break; 9811 case ICmpInst::ICMP_SGT: 9812 case ICmpInst::ICMP_SGE: 9813 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 9814 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 9815 return true; 9816 break; 9817 case ICmpInst::ICMP_ULT: 9818 case ICmpInst::ICMP_ULE: 9819 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 9820 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 9821 return true; 9822 break; 9823 case ICmpInst::ICMP_UGT: 9824 case ICmpInst::ICMP_UGE: 9825 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 9826 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 9827 return true; 9828 break; 9829 } 9830 9831 // Maybe it can be proved via operations? 9832 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9833 return true; 9834 9835 return false; 9836 } 9837 9838 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 9839 const SCEV *LHS, 9840 const SCEV *RHS, 9841 const SCEV *FoundLHS, 9842 const SCEV *FoundRHS) { 9843 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 9844 // The restriction on `FoundRHS` be lifted easily -- it exists only to 9845 // reduce the compile time impact of this optimization. 9846 return false; 9847 9848 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 9849 if (!Addend) 9850 return false; 9851 9852 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 9853 9854 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 9855 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 9856 ConstantRange FoundLHSRange = 9857 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 9858 9859 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 9860 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 9861 9862 // We can also compute the range of values for `LHS` that satisfy the 9863 // consequent, "`LHS` `Pred` `RHS`": 9864 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 9865 ConstantRange SatisfyingLHSRange = 9866 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 9867 9868 // The antecedent implies the consequent if every value of `LHS` that 9869 // satisfies the antecedent also satisfies the consequent. 9870 return SatisfyingLHSRange.contains(LHSRange); 9871 } 9872 9873 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 9874 bool IsSigned, bool NoWrap) { 9875 assert(isKnownPositive(Stride) && "Positive stride expected!"); 9876 9877 if (NoWrap) return false; 9878 9879 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9880 const SCEV *One = getOne(Stride->getType()); 9881 9882 if (IsSigned) { 9883 APInt MaxRHS = getSignedRangeMax(RHS); 9884 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 9885 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9886 9887 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 9888 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 9889 } 9890 9891 APInt MaxRHS = getUnsignedRangeMax(RHS); 9892 APInt MaxValue = APInt::getMaxValue(BitWidth); 9893 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9894 9895 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 9896 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 9897 } 9898 9899 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 9900 bool IsSigned, bool NoWrap) { 9901 if (NoWrap) return false; 9902 9903 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9904 const SCEV *One = getOne(Stride->getType()); 9905 9906 if (IsSigned) { 9907 APInt MinRHS = getSignedRangeMin(RHS); 9908 APInt MinValue = APInt::getSignedMinValue(BitWidth); 9909 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9910 9911 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 9912 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 9913 } 9914 9915 APInt MinRHS = getUnsignedRangeMin(RHS); 9916 APInt MinValue = APInt::getMinValue(BitWidth); 9917 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9918 9919 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 9920 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 9921 } 9922 9923 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 9924 bool Equality) { 9925 const SCEV *One = getOne(Step->getType()); 9926 Delta = Equality ? getAddExpr(Delta, Step) 9927 : getAddExpr(Delta, getMinusSCEV(Step, One)); 9928 return getUDivExpr(Delta, Step); 9929 } 9930 9931 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 9932 const SCEV *Stride, 9933 const SCEV *End, 9934 unsigned BitWidth, 9935 bool IsSigned) { 9936 9937 assert(!isKnownNonPositive(Stride) && 9938 "Stride is expected strictly positive!"); 9939 // Calculate the maximum backedge count based on the range of values 9940 // permitted by Start, End, and Stride. 9941 const SCEV *MaxBECount; 9942 APInt MinStart = 9943 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 9944 9945 APInt StrideForMaxBECount = 9946 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 9947 9948 // We already know that the stride is positive, so we paper over conservatism 9949 // in our range computation by forcing StrideForMaxBECount to be at least one. 9950 // In theory this is unnecessary, but we expect MaxBECount to be a 9951 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 9952 // is nothing to constant fold it to). 9953 APInt One(BitWidth, 1, IsSigned); 9954 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 9955 9956 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 9957 : APInt::getMaxValue(BitWidth); 9958 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 9959 9960 // Although End can be a MAX expression we estimate MaxEnd considering only 9961 // the case End = RHS of the loop termination condition. This is safe because 9962 // in the other case (End - Start) is zero, leading to a zero maximum backedge 9963 // taken count. 9964 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 9965 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 9966 9967 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 9968 getConstant(StrideForMaxBECount) /* Step */, 9969 false /* Equality */); 9970 9971 return MaxBECount; 9972 } 9973 9974 ScalarEvolution::ExitLimit 9975 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 9976 const Loop *L, bool IsSigned, 9977 bool ControlsExit, bool AllowPredicates) { 9978 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9979 9980 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9981 bool PredicatedIV = false; 9982 9983 if (!IV && AllowPredicates) { 9984 // Try to make this an AddRec using runtime tests, in the first X 9985 // iterations of this loop, where X is the SCEV expression found by the 9986 // algorithm below. 9987 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9988 PredicatedIV = true; 9989 } 9990 9991 // Avoid weird loops 9992 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9993 return getCouldNotCompute(); 9994 9995 bool NoWrap = ControlsExit && 9996 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9997 9998 const SCEV *Stride = IV->getStepRecurrence(*this); 9999 10000 bool PositiveStride = isKnownPositive(Stride); 10001 10002 // Avoid negative or zero stride values. 10003 if (!PositiveStride) { 10004 // We can compute the correct backedge taken count for loops with unknown 10005 // strides if we can prove that the loop is not an infinite loop with side 10006 // effects. Here's the loop structure we are trying to handle - 10007 // 10008 // i = start 10009 // do { 10010 // A[i] = i; 10011 // i += s; 10012 // } while (i < end); 10013 // 10014 // The backedge taken count for such loops is evaluated as - 10015 // (max(end, start + stride) - start - 1) /u stride 10016 // 10017 // The additional preconditions that we need to check to prove correctness 10018 // of the above formula is as follows - 10019 // 10020 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10021 // NoWrap flag). 10022 // b) loop is single exit with no side effects. 10023 // 10024 // 10025 // Precondition a) implies that if the stride is negative, this is a single 10026 // trip loop. The backedge taken count formula reduces to zero in this case. 10027 // 10028 // Precondition b) implies that the unknown stride cannot be zero otherwise 10029 // we have UB. 10030 // 10031 // The positive stride case is the same as isKnownPositive(Stride) returning 10032 // true (original behavior of the function). 10033 // 10034 // We want to make sure that the stride is truly unknown as there are edge 10035 // cases where ScalarEvolution propagates no wrap flags to the 10036 // post-increment/decrement IV even though the increment/decrement operation 10037 // itself is wrapping. The computed backedge taken count may be wrong in 10038 // such cases. This is prevented by checking that the stride is not known to 10039 // be either positive or non-positive. For example, no wrap flags are 10040 // propagated to the post-increment IV of this loop with a trip count of 2 - 10041 // 10042 // unsigned char i; 10043 // for(i=127; i<128; i+=129) 10044 // A[i] = i; 10045 // 10046 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10047 !loopHasNoSideEffects(L)) 10048 return getCouldNotCompute(); 10049 } else if (!Stride->isOne() && 10050 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10051 // Avoid proven overflow cases: this will ensure that the backedge taken 10052 // count will not generate any unsigned overflow. Relaxed no-overflow 10053 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10054 // undefined behaviors like the case of C language. 10055 return getCouldNotCompute(); 10056 10057 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10058 : ICmpInst::ICMP_ULT; 10059 const SCEV *Start = IV->getStart(); 10060 const SCEV *End = RHS; 10061 // When the RHS is not invariant, we do not know the end bound of the loop and 10062 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10063 // calculate the MaxBECount, given the start, stride and max value for the end 10064 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10065 // checked above). 10066 if (!isLoopInvariant(RHS, L)) { 10067 const SCEV *MaxBECount = computeMaxBECountForLT( 10068 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10069 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10070 false /*MaxOrZero*/, Predicates); 10071 } 10072 // If the backedge is taken at least once, then it will be taken 10073 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10074 // is the LHS value of the less-than comparison the first time it is evaluated 10075 // and End is the RHS. 10076 const SCEV *BECountIfBackedgeTaken = 10077 computeBECount(getMinusSCEV(End, Start), Stride, false); 10078 // If the loop entry is guarded by the result of the backedge test of the 10079 // first loop iteration, then we know the backedge will be taken at least 10080 // once and so the backedge taken count is as above. If not then we use the 10081 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10082 // as if the backedge is taken at least once max(End,Start) is End and so the 10083 // result is as above, and if not max(End,Start) is Start so we get a backedge 10084 // count of zero. 10085 const SCEV *BECount; 10086 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10087 BECount = BECountIfBackedgeTaken; 10088 else { 10089 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10090 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10091 } 10092 10093 const SCEV *MaxBECount; 10094 bool MaxOrZero = false; 10095 if (isa<SCEVConstant>(BECount)) 10096 MaxBECount = BECount; 10097 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10098 // If we know exactly how many times the backedge will be taken if it's 10099 // taken at least once, then the backedge count will either be that or 10100 // zero. 10101 MaxBECount = BECountIfBackedgeTaken; 10102 MaxOrZero = true; 10103 } else { 10104 MaxBECount = computeMaxBECountForLT( 10105 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10106 } 10107 10108 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10109 !isa<SCEVCouldNotCompute>(BECount)) 10110 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10111 10112 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10113 } 10114 10115 ScalarEvolution::ExitLimit 10116 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10117 const Loop *L, bool IsSigned, 10118 bool ControlsExit, bool AllowPredicates) { 10119 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10120 // We handle only IV > Invariant 10121 if (!isLoopInvariant(RHS, L)) 10122 return getCouldNotCompute(); 10123 10124 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10125 if (!IV && AllowPredicates) 10126 // Try to make this an AddRec using runtime tests, in the first X 10127 // iterations of this loop, where X is the SCEV expression found by the 10128 // algorithm below. 10129 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10130 10131 // Avoid weird loops 10132 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10133 return getCouldNotCompute(); 10134 10135 bool NoWrap = ControlsExit && 10136 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10137 10138 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10139 10140 // Avoid negative or zero stride values 10141 if (!isKnownPositive(Stride)) 10142 return getCouldNotCompute(); 10143 10144 // Avoid proven overflow cases: this will ensure that the backedge taken count 10145 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10146 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10147 // behaviors like the case of C language. 10148 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10149 return getCouldNotCompute(); 10150 10151 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10152 : ICmpInst::ICMP_UGT; 10153 10154 const SCEV *Start = IV->getStart(); 10155 const SCEV *End = RHS; 10156 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10157 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10158 10159 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10160 10161 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10162 : getUnsignedRangeMax(Start); 10163 10164 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10165 : getUnsignedRangeMin(Stride); 10166 10167 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10168 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10169 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10170 10171 // Although End can be a MIN expression we estimate MinEnd considering only 10172 // the case End = RHS. This is safe because in the other case (Start - End) 10173 // is zero, leading to a zero maximum backedge taken count. 10174 APInt MinEnd = 10175 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10176 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10177 10178 10179 const SCEV *MaxBECount = getCouldNotCompute(); 10180 if (isa<SCEVConstant>(BECount)) 10181 MaxBECount = BECount; 10182 else 10183 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 10184 getConstant(MinStride), false); 10185 10186 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10187 MaxBECount = BECount; 10188 10189 return ExitLimit(BECount, MaxBECount, false, Predicates); 10190 } 10191 10192 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10193 ScalarEvolution &SE) const { 10194 if (Range.isFullSet()) // Infinite loop. 10195 return SE.getCouldNotCompute(); 10196 10197 // If the start is a non-zero constant, shift the range to simplify things. 10198 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10199 if (!SC->getValue()->isZero()) { 10200 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10201 Operands[0] = SE.getZero(SC->getType()); 10202 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10203 getNoWrapFlags(FlagNW)); 10204 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10205 return ShiftedAddRec->getNumIterationsInRange( 10206 Range.subtract(SC->getAPInt()), SE); 10207 // This is strange and shouldn't happen. 10208 return SE.getCouldNotCompute(); 10209 } 10210 10211 // The only time we can solve this is when we have all constant indices. 10212 // Otherwise, we cannot determine the overflow conditions. 10213 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10214 return SE.getCouldNotCompute(); 10215 10216 // Okay at this point we know that all elements of the chrec are constants and 10217 // that the start element is zero. 10218 10219 // First check to see if the range contains zero. If not, the first 10220 // iteration exits. 10221 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10222 if (!Range.contains(APInt(BitWidth, 0))) 10223 return SE.getZero(getType()); 10224 10225 if (isAffine()) { 10226 // If this is an affine expression then we have this situation: 10227 // Solve {0,+,A} in Range === Ax in Range 10228 10229 // We know that zero is in the range. If A is positive then we know that 10230 // the upper value of the range must be the first possible exit value. 10231 // If A is negative then the lower of the range is the last possible loop 10232 // value. Also note that we already checked for a full range. 10233 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10234 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10235 10236 // The exit value should be (End+A)/A. 10237 APInt ExitVal = (End + A).udiv(A); 10238 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10239 10240 // Evaluate at the exit value. If we really did fall out of the valid 10241 // range, then we computed our trip count, otherwise wrap around or other 10242 // things must have happened. 10243 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10244 if (Range.contains(Val->getValue())) 10245 return SE.getCouldNotCompute(); // Something strange happened 10246 10247 // Ensure that the previous value is in the range. This is a sanity check. 10248 assert(Range.contains( 10249 EvaluateConstantChrecAtConstant(this, 10250 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10251 "Linear scev computation is off in a bad way!"); 10252 return SE.getConstant(ExitValue); 10253 } else if (isQuadratic()) { 10254 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 10255 // quadratic equation to solve it. To do this, we must frame our problem in 10256 // terms of figuring out when zero is crossed, instead of when 10257 // Range.getUpper() is crossed. 10258 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 10259 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 10260 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 10261 10262 // Next, solve the constructed addrec 10263 if (auto Roots = 10264 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 10265 const SCEVConstant *R1 = Roots->first; 10266 const SCEVConstant *R2 = Roots->second; 10267 // Pick the smallest positive root value. 10268 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 10269 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 10270 if (!CB->getZExtValue()) 10271 std::swap(R1, R2); // R1 is the minimum root now. 10272 10273 // Make sure the root is not off by one. The returned iteration should 10274 // not be in the range, but the previous one should be. When solving 10275 // for "X*X < 5", for example, we should not return a root of 2. 10276 ConstantInt *R1Val = 10277 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 10278 if (Range.contains(R1Val->getValue())) { 10279 // The next iteration must be out of the range... 10280 ConstantInt *NextVal = 10281 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 10282 10283 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10284 if (!Range.contains(R1Val->getValue())) 10285 return SE.getConstant(NextVal); 10286 return SE.getCouldNotCompute(); // Something strange happened 10287 } 10288 10289 // If R1 was not in the range, then it is a good return value. Make 10290 // sure that R1-1 WAS in the range though, just in case. 10291 ConstantInt *NextVal = 10292 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 10293 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10294 if (Range.contains(R1Val->getValue())) 10295 return R1; 10296 return SE.getCouldNotCompute(); // Something strange happened 10297 } 10298 } 10299 } 10300 10301 return SE.getCouldNotCompute(); 10302 } 10303 10304 const SCEVAddRecExpr * 10305 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10306 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10307 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10308 // but in this case we cannot guarantee that the value returned will be an 10309 // AddRec because SCEV does not have a fixed point where it stops 10310 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10311 // may happen if we reach arithmetic depth limit while simplifying. So we 10312 // construct the returned value explicitly. 10313 SmallVector<const SCEV *, 3> Ops; 10314 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10315 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10316 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10317 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10318 // We know that the last operand is not a constant zero (otherwise it would 10319 // have been popped out earlier). This guarantees us that if the result has 10320 // the same last operand, then it will also not be popped out, meaning that 10321 // the returned value will be an AddRec. 10322 const SCEV *Last = getOperand(getNumOperands() - 1); 10323 assert(!Last->isZero() && "Recurrency with zero step?"); 10324 Ops.push_back(Last); 10325 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10326 SCEV::FlagAnyWrap)); 10327 } 10328 10329 // Return true when S contains at least an undef value. 10330 static inline bool containsUndefs(const SCEV *S) { 10331 return SCEVExprContains(S, [](const SCEV *S) { 10332 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10333 return isa<UndefValue>(SU->getValue()); 10334 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 10335 return isa<UndefValue>(SC->getValue()); 10336 return false; 10337 }); 10338 } 10339 10340 namespace { 10341 10342 // Collect all steps of SCEV expressions. 10343 struct SCEVCollectStrides { 10344 ScalarEvolution &SE; 10345 SmallVectorImpl<const SCEV *> &Strides; 10346 10347 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10348 : SE(SE), Strides(S) {} 10349 10350 bool follow(const SCEV *S) { 10351 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10352 Strides.push_back(AR->getStepRecurrence(SE)); 10353 return true; 10354 } 10355 10356 bool isDone() const { return false; } 10357 }; 10358 10359 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10360 struct SCEVCollectTerms { 10361 SmallVectorImpl<const SCEV *> &Terms; 10362 10363 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10364 10365 bool follow(const SCEV *S) { 10366 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10367 isa<SCEVSignExtendExpr>(S)) { 10368 if (!containsUndefs(S)) 10369 Terms.push_back(S); 10370 10371 // Stop recursion: once we collected a term, do not walk its operands. 10372 return false; 10373 } 10374 10375 // Keep looking. 10376 return true; 10377 } 10378 10379 bool isDone() const { return false; } 10380 }; 10381 10382 // Check if a SCEV contains an AddRecExpr. 10383 struct SCEVHasAddRec { 10384 bool &ContainsAddRec; 10385 10386 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10387 ContainsAddRec = false; 10388 } 10389 10390 bool follow(const SCEV *S) { 10391 if (isa<SCEVAddRecExpr>(S)) { 10392 ContainsAddRec = true; 10393 10394 // Stop recursion: once we collected a term, do not walk its operands. 10395 return false; 10396 } 10397 10398 // Keep looking. 10399 return true; 10400 } 10401 10402 bool isDone() const { return false; } 10403 }; 10404 10405 // Find factors that are multiplied with an expression that (possibly as a 10406 // subexpression) contains an AddRecExpr. In the expression: 10407 // 10408 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10409 // 10410 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10411 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10412 // parameters as they form a product with an induction variable. 10413 // 10414 // This collector expects all array size parameters to be in the same MulExpr. 10415 // It might be necessary to later add support for collecting parameters that are 10416 // spread over different nested MulExpr. 10417 struct SCEVCollectAddRecMultiplies { 10418 SmallVectorImpl<const SCEV *> &Terms; 10419 ScalarEvolution &SE; 10420 10421 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10422 : Terms(T), SE(SE) {} 10423 10424 bool follow(const SCEV *S) { 10425 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10426 bool HasAddRec = false; 10427 SmallVector<const SCEV *, 0> Operands; 10428 for (auto Op : Mul->operands()) { 10429 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10430 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10431 Operands.push_back(Op); 10432 } else if (Unknown) { 10433 HasAddRec = true; 10434 } else { 10435 bool ContainsAddRec; 10436 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10437 visitAll(Op, ContiansAddRec); 10438 HasAddRec |= ContainsAddRec; 10439 } 10440 } 10441 if (Operands.size() == 0) 10442 return true; 10443 10444 if (!HasAddRec) 10445 return false; 10446 10447 Terms.push_back(SE.getMulExpr(Operands)); 10448 // Stop recursion: once we collected a term, do not walk its operands. 10449 return false; 10450 } 10451 10452 // Keep looking. 10453 return true; 10454 } 10455 10456 bool isDone() const { return false; } 10457 }; 10458 10459 } // end anonymous namespace 10460 10461 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10462 /// two places: 10463 /// 1) The strides of AddRec expressions. 10464 /// 2) Unknowns that are multiplied with AddRec expressions. 10465 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10466 SmallVectorImpl<const SCEV *> &Terms) { 10467 SmallVector<const SCEV *, 4> Strides; 10468 SCEVCollectStrides StrideCollector(*this, Strides); 10469 visitAll(Expr, StrideCollector); 10470 10471 DEBUG({ 10472 dbgs() << "Strides:\n"; 10473 for (const SCEV *S : Strides) 10474 dbgs() << *S << "\n"; 10475 }); 10476 10477 for (const SCEV *S : Strides) { 10478 SCEVCollectTerms TermCollector(Terms); 10479 visitAll(S, TermCollector); 10480 } 10481 10482 DEBUG({ 10483 dbgs() << "Terms:\n"; 10484 for (const SCEV *T : Terms) 10485 dbgs() << *T << "\n"; 10486 }); 10487 10488 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10489 visitAll(Expr, MulCollector); 10490 } 10491 10492 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10493 SmallVectorImpl<const SCEV *> &Terms, 10494 SmallVectorImpl<const SCEV *> &Sizes) { 10495 int Last = Terms.size() - 1; 10496 const SCEV *Step = Terms[Last]; 10497 10498 // End of recursion. 10499 if (Last == 0) { 10500 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10501 SmallVector<const SCEV *, 2> Qs; 10502 for (const SCEV *Op : M->operands()) 10503 if (!isa<SCEVConstant>(Op)) 10504 Qs.push_back(Op); 10505 10506 Step = SE.getMulExpr(Qs); 10507 } 10508 10509 Sizes.push_back(Step); 10510 return true; 10511 } 10512 10513 for (const SCEV *&Term : Terms) { 10514 // Normalize the terms before the next call to findArrayDimensionsRec. 10515 const SCEV *Q, *R; 10516 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10517 10518 // Bail out when GCD does not evenly divide one of the terms. 10519 if (!R->isZero()) 10520 return false; 10521 10522 Term = Q; 10523 } 10524 10525 // Remove all SCEVConstants. 10526 Terms.erase( 10527 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10528 Terms.end()); 10529 10530 if (Terms.size() > 0) 10531 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10532 return false; 10533 10534 Sizes.push_back(Step); 10535 return true; 10536 } 10537 10538 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10539 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 10540 for (const SCEV *T : Terms) 10541 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 10542 return true; 10543 return false; 10544 } 10545 10546 // Return the number of product terms in S. 10547 static inline int numberOfTerms(const SCEV *S) { 10548 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 10549 return Expr->getNumOperands(); 10550 return 1; 10551 } 10552 10553 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 10554 if (isa<SCEVConstant>(T)) 10555 return nullptr; 10556 10557 if (isa<SCEVUnknown>(T)) 10558 return T; 10559 10560 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 10561 SmallVector<const SCEV *, 2> Factors; 10562 for (const SCEV *Op : M->operands()) 10563 if (!isa<SCEVConstant>(Op)) 10564 Factors.push_back(Op); 10565 10566 return SE.getMulExpr(Factors); 10567 } 10568 10569 return T; 10570 } 10571 10572 /// Return the size of an element read or written by Inst. 10573 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 10574 Type *Ty; 10575 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 10576 Ty = Store->getValueOperand()->getType(); 10577 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 10578 Ty = Load->getType(); 10579 else 10580 return nullptr; 10581 10582 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 10583 return getSizeOfExpr(ETy, Ty); 10584 } 10585 10586 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 10587 SmallVectorImpl<const SCEV *> &Sizes, 10588 const SCEV *ElementSize) { 10589 if (Terms.size() < 1 || !ElementSize) 10590 return; 10591 10592 // Early return when Terms do not contain parameters: we do not delinearize 10593 // non parametric SCEVs. 10594 if (!containsParameters(Terms)) 10595 return; 10596 10597 DEBUG({ 10598 dbgs() << "Terms:\n"; 10599 for (const SCEV *T : Terms) 10600 dbgs() << *T << "\n"; 10601 }); 10602 10603 // Remove duplicates. 10604 array_pod_sort(Terms.begin(), Terms.end()); 10605 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 10606 10607 // Put larger terms first. 10608 llvm::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 10609 return numberOfTerms(LHS) > numberOfTerms(RHS); 10610 }); 10611 10612 // Try to divide all terms by the element size. If term is not divisible by 10613 // element size, proceed with the original term. 10614 for (const SCEV *&Term : Terms) { 10615 const SCEV *Q, *R; 10616 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 10617 if (!Q->isZero()) 10618 Term = Q; 10619 } 10620 10621 SmallVector<const SCEV *, 4> NewTerms; 10622 10623 // Remove constant factors. 10624 for (const SCEV *T : Terms) 10625 if (const SCEV *NewT = removeConstantFactors(*this, T)) 10626 NewTerms.push_back(NewT); 10627 10628 DEBUG({ 10629 dbgs() << "Terms after sorting:\n"; 10630 for (const SCEV *T : NewTerms) 10631 dbgs() << *T << "\n"; 10632 }); 10633 10634 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 10635 Sizes.clear(); 10636 return; 10637 } 10638 10639 // The last element to be pushed into Sizes is the size of an element. 10640 Sizes.push_back(ElementSize); 10641 10642 DEBUG({ 10643 dbgs() << "Sizes:\n"; 10644 for (const SCEV *S : Sizes) 10645 dbgs() << *S << "\n"; 10646 }); 10647 } 10648 10649 void ScalarEvolution::computeAccessFunctions( 10650 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 10651 SmallVectorImpl<const SCEV *> &Sizes) { 10652 // Early exit in case this SCEV is not an affine multivariate function. 10653 if (Sizes.empty()) 10654 return; 10655 10656 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 10657 if (!AR->isAffine()) 10658 return; 10659 10660 const SCEV *Res = Expr; 10661 int Last = Sizes.size() - 1; 10662 for (int i = Last; i >= 0; i--) { 10663 const SCEV *Q, *R; 10664 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 10665 10666 DEBUG({ 10667 dbgs() << "Res: " << *Res << "\n"; 10668 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 10669 dbgs() << "Res divided by Sizes[i]:\n"; 10670 dbgs() << "Quotient: " << *Q << "\n"; 10671 dbgs() << "Remainder: " << *R << "\n"; 10672 }); 10673 10674 Res = Q; 10675 10676 // Do not record the last subscript corresponding to the size of elements in 10677 // the array. 10678 if (i == Last) { 10679 10680 // Bail out if the remainder is too complex. 10681 if (isa<SCEVAddRecExpr>(R)) { 10682 Subscripts.clear(); 10683 Sizes.clear(); 10684 return; 10685 } 10686 10687 continue; 10688 } 10689 10690 // Record the access function for the current subscript. 10691 Subscripts.push_back(R); 10692 } 10693 10694 // Also push in last position the remainder of the last division: it will be 10695 // the access function of the innermost dimension. 10696 Subscripts.push_back(Res); 10697 10698 std::reverse(Subscripts.begin(), Subscripts.end()); 10699 10700 DEBUG({ 10701 dbgs() << "Subscripts:\n"; 10702 for (const SCEV *S : Subscripts) 10703 dbgs() << *S << "\n"; 10704 }); 10705 } 10706 10707 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 10708 /// sizes of an array access. Returns the remainder of the delinearization that 10709 /// is the offset start of the array. The SCEV->delinearize algorithm computes 10710 /// the multiples of SCEV coefficients: that is a pattern matching of sub 10711 /// expressions in the stride and base of a SCEV corresponding to the 10712 /// computation of a GCD (greatest common divisor) of base and stride. When 10713 /// SCEV->delinearize fails, it returns the SCEV unchanged. 10714 /// 10715 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 10716 /// 10717 /// void foo(long n, long m, long o, double A[n][m][o]) { 10718 /// 10719 /// for (long i = 0; i < n; i++) 10720 /// for (long j = 0; j < m; j++) 10721 /// for (long k = 0; k < o; k++) 10722 /// A[i][j][k] = 1.0; 10723 /// } 10724 /// 10725 /// the delinearization input is the following AddRec SCEV: 10726 /// 10727 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 10728 /// 10729 /// From this SCEV, we are able to say that the base offset of the access is %A 10730 /// because it appears as an offset that does not divide any of the strides in 10731 /// the loops: 10732 /// 10733 /// CHECK: Base offset: %A 10734 /// 10735 /// and then SCEV->delinearize determines the size of some of the dimensions of 10736 /// the array as these are the multiples by which the strides are happening: 10737 /// 10738 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 10739 /// 10740 /// Note that the outermost dimension remains of UnknownSize because there are 10741 /// no strides that would help identifying the size of the last dimension: when 10742 /// the array has been statically allocated, one could compute the size of that 10743 /// dimension by dividing the overall size of the array by the size of the known 10744 /// dimensions: %m * %o * 8. 10745 /// 10746 /// Finally delinearize provides the access functions for the array reference 10747 /// that does correspond to A[i][j][k] of the above C testcase: 10748 /// 10749 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 10750 /// 10751 /// The testcases are checking the output of a function pass: 10752 /// DelinearizationPass that walks through all loads and stores of a function 10753 /// asking for the SCEV of the memory access with respect to all enclosing 10754 /// loops, calling SCEV->delinearize on that and printing the results. 10755 void ScalarEvolution::delinearize(const SCEV *Expr, 10756 SmallVectorImpl<const SCEV *> &Subscripts, 10757 SmallVectorImpl<const SCEV *> &Sizes, 10758 const SCEV *ElementSize) { 10759 // First step: collect parametric terms. 10760 SmallVector<const SCEV *, 4> Terms; 10761 collectParametricTerms(Expr, Terms); 10762 10763 if (Terms.empty()) 10764 return; 10765 10766 // Second step: find subscript sizes. 10767 findArrayDimensions(Terms, Sizes, ElementSize); 10768 10769 if (Sizes.empty()) 10770 return; 10771 10772 // Third step: compute the access functions for each subscript. 10773 computeAccessFunctions(Expr, Subscripts, Sizes); 10774 10775 if (Subscripts.empty()) 10776 return; 10777 10778 DEBUG({ 10779 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 10780 dbgs() << "ArrayDecl[UnknownSize]"; 10781 for (const SCEV *S : Sizes) 10782 dbgs() << "[" << *S << "]"; 10783 10784 dbgs() << "\nArrayRef"; 10785 for (const SCEV *S : Subscripts) 10786 dbgs() << "[" << *S << "]"; 10787 dbgs() << "\n"; 10788 }); 10789 } 10790 10791 //===----------------------------------------------------------------------===// 10792 // SCEVCallbackVH Class Implementation 10793 //===----------------------------------------------------------------------===// 10794 10795 void ScalarEvolution::SCEVCallbackVH::deleted() { 10796 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10797 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 10798 SE->ConstantEvolutionLoopExitValue.erase(PN); 10799 SE->eraseValueFromMap(getValPtr()); 10800 // this now dangles! 10801 } 10802 10803 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 10804 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10805 10806 // Forget all the expressions associated with users of the old value, 10807 // so that future queries will recompute the expressions using the new 10808 // value. 10809 Value *Old = getValPtr(); 10810 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 10811 SmallPtrSet<User *, 8> Visited; 10812 while (!Worklist.empty()) { 10813 User *U = Worklist.pop_back_val(); 10814 // Deleting the Old value will cause this to dangle. Postpone 10815 // that until everything else is done. 10816 if (U == Old) 10817 continue; 10818 if (!Visited.insert(U).second) 10819 continue; 10820 if (PHINode *PN = dyn_cast<PHINode>(U)) 10821 SE->ConstantEvolutionLoopExitValue.erase(PN); 10822 SE->eraseValueFromMap(U); 10823 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 10824 } 10825 // Delete the Old value. 10826 if (PHINode *PN = dyn_cast<PHINode>(Old)) 10827 SE->ConstantEvolutionLoopExitValue.erase(PN); 10828 SE->eraseValueFromMap(Old); 10829 // this now dangles! 10830 } 10831 10832 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 10833 : CallbackVH(V), SE(se) {} 10834 10835 //===----------------------------------------------------------------------===// 10836 // ScalarEvolution Class Implementation 10837 //===----------------------------------------------------------------------===// 10838 10839 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 10840 AssumptionCache &AC, DominatorTree &DT, 10841 LoopInfo &LI) 10842 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 10843 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 10844 LoopDispositions(64), BlockDispositions(64) { 10845 // To use guards for proving predicates, we need to scan every instruction in 10846 // relevant basic blocks, and not just terminators. Doing this is a waste of 10847 // time if the IR does not actually contain any calls to 10848 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 10849 // 10850 // This pessimizes the case where a pass that preserves ScalarEvolution wants 10851 // to _add_ guards to the module when there weren't any before, and wants 10852 // ScalarEvolution to optimize based on those guards. For now we prefer to be 10853 // efficient in lieu of being smart in that rather obscure case. 10854 10855 auto *GuardDecl = F.getParent()->getFunction( 10856 Intrinsic::getName(Intrinsic::experimental_guard)); 10857 HasGuards = GuardDecl && !GuardDecl->use_empty(); 10858 } 10859 10860 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 10861 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 10862 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 10863 ValueExprMap(std::move(Arg.ValueExprMap)), 10864 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 10865 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 10866 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 10867 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 10868 PredicatedBackedgeTakenCounts( 10869 std::move(Arg.PredicatedBackedgeTakenCounts)), 10870 ConstantEvolutionLoopExitValue( 10871 std::move(Arg.ConstantEvolutionLoopExitValue)), 10872 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 10873 LoopDispositions(std::move(Arg.LoopDispositions)), 10874 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 10875 BlockDispositions(std::move(Arg.BlockDispositions)), 10876 UnsignedRanges(std::move(Arg.UnsignedRanges)), 10877 SignedRanges(std::move(Arg.SignedRanges)), 10878 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 10879 UniquePreds(std::move(Arg.UniquePreds)), 10880 SCEVAllocator(std::move(Arg.SCEVAllocator)), 10881 LoopUsers(std::move(Arg.LoopUsers)), 10882 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 10883 FirstUnknown(Arg.FirstUnknown) { 10884 Arg.FirstUnknown = nullptr; 10885 } 10886 10887 ScalarEvolution::~ScalarEvolution() { 10888 // Iterate through all the SCEVUnknown instances and call their 10889 // destructors, so that they release their references to their values. 10890 for (SCEVUnknown *U = FirstUnknown; U;) { 10891 SCEVUnknown *Tmp = U; 10892 U = U->Next; 10893 Tmp->~SCEVUnknown(); 10894 } 10895 FirstUnknown = nullptr; 10896 10897 ExprValueMap.clear(); 10898 ValueExprMap.clear(); 10899 HasRecMap.clear(); 10900 10901 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 10902 // that a loop had multiple computable exits. 10903 for (auto &BTCI : BackedgeTakenCounts) 10904 BTCI.second.clear(); 10905 for (auto &BTCI : PredicatedBackedgeTakenCounts) 10906 BTCI.second.clear(); 10907 10908 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 10909 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 10910 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 10911 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 10912 } 10913 10914 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 10915 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 10916 } 10917 10918 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 10919 const Loop *L) { 10920 // Print all inner loops first 10921 for (Loop *I : *L) 10922 PrintLoopInfo(OS, SE, I); 10923 10924 OS << "Loop "; 10925 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10926 OS << ": "; 10927 10928 SmallVector<BasicBlock *, 8> ExitBlocks; 10929 L->getExitBlocks(ExitBlocks); 10930 if (ExitBlocks.size() != 1) 10931 OS << "<multiple exits> "; 10932 10933 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10934 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 10935 } else { 10936 OS << "Unpredictable backedge-taken count. "; 10937 } 10938 10939 OS << "\n" 10940 "Loop "; 10941 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10942 OS << ": "; 10943 10944 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 10945 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 10946 if (SE->isBackedgeTakenCountMaxOrZero(L)) 10947 OS << ", actual taken count either this or zero."; 10948 } else { 10949 OS << "Unpredictable max backedge-taken count. "; 10950 } 10951 10952 OS << "\n" 10953 "Loop "; 10954 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10955 OS << ": "; 10956 10957 SCEVUnionPredicate Pred; 10958 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 10959 if (!isa<SCEVCouldNotCompute>(PBT)) { 10960 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 10961 OS << " Predicates:\n"; 10962 Pred.print(OS, 4); 10963 } else { 10964 OS << "Unpredictable predicated backedge-taken count. "; 10965 } 10966 OS << "\n"; 10967 10968 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10969 OS << "Loop "; 10970 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10971 OS << ": "; 10972 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 10973 } 10974 } 10975 10976 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 10977 switch (LD) { 10978 case ScalarEvolution::LoopVariant: 10979 return "Variant"; 10980 case ScalarEvolution::LoopInvariant: 10981 return "Invariant"; 10982 case ScalarEvolution::LoopComputable: 10983 return "Computable"; 10984 } 10985 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 10986 } 10987 10988 void ScalarEvolution::print(raw_ostream &OS) const { 10989 // ScalarEvolution's implementation of the print method is to print 10990 // out SCEV values of all instructions that are interesting. Doing 10991 // this potentially causes it to create new SCEV objects though, 10992 // which technically conflicts with the const qualifier. This isn't 10993 // observable from outside the class though, so casting away the 10994 // const isn't dangerous. 10995 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10996 10997 OS << "Classifying expressions for: "; 10998 F.printAsOperand(OS, /*PrintType=*/false); 10999 OS << "\n"; 11000 for (Instruction &I : instructions(F)) 11001 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11002 OS << I << '\n'; 11003 OS << " --> "; 11004 const SCEV *SV = SE.getSCEV(&I); 11005 SV->print(OS); 11006 if (!isa<SCEVCouldNotCompute>(SV)) { 11007 OS << " U: "; 11008 SE.getUnsignedRange(SV).print(OS); 11009 OS << " S: "; 11010 SE.getSignedRange(SV).print(OS); 11011 } 11012 11013 const Loop *L = LI.getLoopFor(I.getParent()); 11014 11015 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11016 if (AtUse != SV) { 11017 OS << " --> "; 11018 AtUse->print(OS); 11019 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11020 OS << " U: "; 11021 SE.getUnsignedRange(AtUse).print(OS); 11022 OS << " S: "; 11023 SE.getSignedRange(AtUse).print(OS); 11024 } 11025 } 11026 11027 if (L) { 11028 OS << "\t\t" "Exits: "; 11029 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11030 if (!SE.isLoopInvariant(ExitValue, L)) { 11031 OS << "<<Unknown>>"; 11032 } else { 11033 OS << *ExitValue; 11034 } 11035 11036 bool First = true; 11037 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11038 if (First) { 11039 OS << "\t\t" "LoopDispositions: { "; 11040 First = false; 11041 } else { 11042 OS << ", "; 11043 } 11044 11045 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11046 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11047 } 11048 11049 for (auto *InnerL : depth_first(L)) { 11050 if (InnerL == L) 11051 continue; 11052 if (First) { 11053 OS << "\t\t" "LoopDispositions: { "; 11054 First = false; 11055 } else { 11056 OS << ", "; 11057 } 11058 11059 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11060 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11061 } 11062 11063 OS << " }"; 11064 } 11065 11066 OS << "\n"; 11067 } 11068 11069 OS << "Determining loop execution counts for: "; 11070 F.printAsOperand(OS, /*PrintType=*/false); 11071 OS << "\n"; 11072 for (Loop *I : LI) 11073 PrintLoopInfo(OS, &SE, I); 11074 } 11075 11076 ScalarEvolution::LoopDisposition 11077 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11078 auto &Values = LoopDispositions[S]; 11079 for (auto &V : Values) { 11080 if (V.getPointer() == L) 11081 return V.getInt(); 11082 } 11083 Values.emplace_back(L, LoopVariant); 11084 LoopDisposition D = computeLoopDisposition(S, L); 11085 auto &Values2 = LoopDispositions[S]; 11086 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11087 if (V.getPointer() == L) { 11088 V.setInt(D); 11089 break; 11090 } 11091 } 11092 return D; 11093 } 11094 11095 ScalarEvolution::LoopDisposition 11096 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11097 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11098 case scConstant: 11099 return LoopInvariant; 11100 case scTruncate: 11101 case scZeroExtend: 11102 case scSignExtend: 11103 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11104 case scAddRecExpr: { 11105 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11106 11107 // If L is the addrec's loop, it's computable. 11108 if (AR->getLoop() == L) 11109 return LoopComputable; 11110 11111 // Add recurrences are never invariant in the function-body (null loop). 11112 if (!L) 11113 return LoopVariant; 11114 11115 // Everything that is not defined at loop entry is variant. 11116 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11117 return LoopVariant; 11118 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11119 " dominate the contained loop's header?"); 11120 11121 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11122 if (AR->getLoop()->contains(L)) 11123 return LoopInvariant; 11124 11125 // This recurrence is variant w.r.t. L if any of its operands 11126 // are variant. 11127 for (auto *Op : AR->operands()) 11128 if (!isLoopInvariant(Op, L)) 11129 return LoopVariant; 11130 11131 // Otherwise it's loop-invariant. 11132 return LoopInvariant; 11133 } 11134 case scAddExpr: 11135 case scMulExpr: 11136 case scUMaxExpr: 11137 case scSMaxExpr: { 11138 bool HasVarying = false; 11139 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11140 LoopDisposition D = getLoopDisposition(Op, L); 11141 if (D == LoopVariant) 11142 return LoopVariant; 11143 if (D == LoopComputable) 11144 HasVarying = true; 11145 } 11146 return HasVarying ? LoopComputable : LoopInvariant; 11147 } 11148 case scUDivExpr: { 11149 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11150 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11151 if (LD == LoopVariant) 11152 return LoopVariant; 11153 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11154 if (RD == LoopVariant) 11155 return LoopVariant; 11156 return (LD == LoopInvariant && RD == LoopInvariant) ? 11157 LoopInvariant : LoopComputable; 11158 } 11159 case scUnknown: 11160 // All non-instruction values are loop invariant. All instructions are loop 11161 // invariant if they are not contained in the specified loop. 11162 // Instructions are never considered invariant in the function body 11163 // (null loop) because they are defined within the "loop". 11164 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11165 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11166 return LoopInvariant; 11167 case scCouldNotCompute: 11168 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11169 } 11170 llvm_unreachable("Unknown SCEV kind!"); 11171 } 11172 11173 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11174 return getLoopDisposition(S, L) == LoopInvariant; 11175 } 11176 11177 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11178 return getLoopDisposition(S, L) == LoopComputable; 11179 } 11180 11181 ScalarEvolution::BlockDisposition 11182 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11183 auto &Values = BlockDispositions[S]; 11184 for (auto &V : Values) { 11185 if (V.getPointer() == BB) 11186 return V.getInt(); 11187 } 11188 Values.emplace_back(BB, DoesNotDominateBlock); 11189 BlockDisposition D = computeBlockDisposition(S, BB); 11190 auto &Values2 = BlockDispositions[S]; 11191 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11192 if (V.getPointer() == BB) { 11193 V.setInt(D); 11194 break; 11195 } 11196 } 11197 return D; 11198 } 11199 11200 ScalarEvolution::BlockDisposition 11201 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11202 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11203 case scConstant: 11204 return ProperlyDominatesBlock; 11205 case scTruncate: 11206 case scZeroExtend: 11207 case scSignExtend: 11208 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11209 case scAddRecExpr: { 11210 // This uses a "dominates" query instead of "properly dominates" query 11211 // to test for proper dominance too, because the instruction which 11212 // produces the addrec's value is a PHI, and a PHI effectively properly 11213 // dominates its entire containing block. 11214 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11215 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11216 return DoesNotDominateBlock; 11217 11218 // Fall through into SCEVNAryExpr handling. 11219 LLVM_FALLTHROUGH; 11220 } 11221 case scAddExpr: 11222 case scMulExpr: 11223 case scUMaxExpr: 11224 case scSMaxExpr: { 11225 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11226 bool Proper = true; 11227 for (const SCEV *NAryOp : NAry->operands()) { 11228 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11229 if (D == DoesNotDominateBlock) 11230 return DoesNotDominateBlock; 11231 if (D == DominatesBlock) 11232 Proper = false; 11233 } 11234 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11235 } 11236 case scUDivExpr: { 11237 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11238 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11239 BlockDisposition LD = getBlockDisposition(LHS, BB); 11240 if (LD == DoesNotDominateBlock) 11241 return DoesNotDominateBlock; 11242 BlockDisposition RD = getBlockDisposition(RHS, BB); 11243 if (RD == DoesNotDominateBlock) 11244 return DoesNotDominateBlock; 11245 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11246 ProperlyDominatesBlock : DominatesBlock; 11247 } 11248 case scUnknown: 11249 if (Instruction *I = 11250 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11251 if (I->getParent() == BB) 11252 return DominatesBlock; 11253 if (DT.properlyDominates(I->getParent(), BB)) 11254 return ProperlyDominatesBlock; 11255 return DoesNotDominateBlock; 11256 } 11257 return ProperlyDominatesBlock; 11258 case scCouldNotCompute: 11259 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11260 } 11261 llvm_unreachable("Unknown SCEV kind!"); 11262 } 11263 11264 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11265 return getBlockDisposition(S, BB) >= DominatesBlock; 11266 } 11267 11268 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11269 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11270 } 11271 11272 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11273 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11274 } 11275 11276 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11277 auto IsS = [&](const SCEV *X) { return S == X; }; 11278 auto ContainsS = [&](const SCEV *X) { 11279 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11280 }; 11281 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11282 } 11283 11284 void 11285 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11286 ValuesAtScopes.erase(S); 11287 LoopDispositions.erase(S); 11288 BlockDispositions.erase(S); 11289 UnsignedRanges.erase(S); 11290 SignedRanges.erase(S); 11291 ExprValueMap.erase(S); 11292 HasRecMap.erase(S); 11293 MinTrailingZerosCache.erase(S); 11294 11295 for (auto I = PredicatedSCEVRewrites.begin(); 11296 I != PredicatedSCEVRewrites.end();) { 11297 std::pair<const SCEV *, const Loop *> Entry = I->first; 11298 if (Entry.first == S) 11299 PredicatedSCEVRewrites.erase(I++); 11300 else 11301 ++I; 11302 } 11303 11304 auto RemoveSCEVFromBackedgeMap = 11305 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11306 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11307 BackedgeTakenInfo &BEInfo = I->second; 11308 if (BEInfo.hasOperand(S, this)) { 11309 BEInfo.clear(); 11310 Map.erase(I++); 11311 } else 11312 ++I; 11313 } 11314 }; 11315 11316 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11317 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11318 } 11319 11320 void 11321 ScalarEvolution::getUsedLoops(const SCEV *S, 11322 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 11323 struct FindUsedLoops { 11324 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 11325 : LoopsUsed(LoopsUsed) {} 11326 SmallPtrSetImpl<const Loop *> &LoopsUsed; 11327 bool follow(const SCEV *S) { 11328 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11329 LoopsUsed.insert(AR->getLoop()); 11330 return true; 11331 } 11332 11333 bool isDone() const { return false; } 11334 }; 11335 11336 FindUsedLoops F(LoopsUsed); 11337 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11338 } 11339 11340 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11341 SmallPtrSet<const Loop *, 8> LoopsUsed; 11342 getUsedLoops(S, LoopsUsed); 11343 for (auto *L : LoopsUsed) 11344 LoopUsers[L].push_back(S); 11345 } 11346 11347 void ScalarEvolution::verify() const { 11348 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11349 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11350 11351 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11352 11353 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11354 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11355 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11356 11357 const SCEV *visitConstant(const SCEVConstant *Constant) { 11358 return SE.getConstant(Constant->getAPInt()); 11359 } 11360 11361 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11362 return SE.getUnknown(Expr->getValue()); 11363 } 11364 11365 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11366 return SE.getCouldNotCompute(); 11367 } 11368 }; 11369 11370 SCEVMapper SCM(SE2); 11371 11372 while (!LoopStack.empty()) { 11373 auto *L = LoopStack.pop_back_val(); 11374 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11375 11376 auto *CurBECount = SCM.visit( 11377 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11378 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11379 11380 if (CurBECount == SE2.getCouldNotCompute() || 11381 NewBECount == SE2.getCouldNotCompute()) { 11382 // NB! This situation is legal, but is very suspicious -- whatever pass 11383 // change the loop to make a trip count go from could not compute to 11384 // computable or vice-versa *should have* invalidated SCEV. However, we 11385 // choose not to assert here (for now) since we don't want false 11386 // positives. 11387 continue; 11388 } 11389 11390 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11391 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11392 // not propagate undef aggressively). This means we can (and do) fail 11393 // verification in cases where a transform makes the trip count of a loop 11394 // go from "undef" to "undef+1" (say). The transform is fine, since in 11395 // both cases the loop iterates "undef" times, but SCEV thinks we 11396 // increased the trip count of the loop by 1 incorrectly. 11397 continue; 11398 } 11399 11400 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11401 SE.getTypeSizeInBits(NewBECount->getType())) 11402 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11403 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11404 SE.getTypeSizeInBits(NewBECount->getType())) 11405 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11406 11407 auto *ConstantDelta = 11408 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11409 11410 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11411 dbgs() << "Trip Count Changed!\n"; 11412 dbgs() << "Old: " << *CurBECount << "\n"; 11413 dbgs() << "New: " << *NewBECount << "\n"; 11414 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11415 std::abort(); 11416 } 11417 } 11418 } 11419 11420 bool ScalarEvolution::invalidate( 11421 Function &F, const PreservedAnalyses &PA, 11422 FunctionAnalysisManager::Invalidator &Inv) { 11423 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11424 // of its dependencies is invalidated. 11425 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11426 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11427 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11428 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11429 Inv.invalidate<LoopAnalysis>(F, PA); 11430 } 11431 11432 AnalysisKey ScalarEvolutionAnalysis::Key; 11433 11434 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11435 FunctionAnalysisManager &AM) { 11436 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11437 AM.getResult<AssumptionAnalysis>(F), 11438 AM.getResult<DominatorTreeAnalysis>(F), 11439 AM.getResult<LoopAnalysis>(F)); 11440 } 11441 11442 PreservedAnalyses 11443 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11444 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11445 return PreservedAnalyses::all(); 11446 } 11447 11448 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11449 "Scalar Evolution Analysis", false, true) 11450 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11451 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11452 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11453 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11454 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11455 "Scalar Evolution Analysis", false, true) 11456 11457 char ScalarEvolutionWrapperPass::ID = 0; 11458 11459 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11460 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11461 } 11462 11463 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11464 SE.reset(new ScalarEvolution( 11465 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11466 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11467 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11468 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11469 return false; 11470 } 11471 11472 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11473 11474 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11475 SE->print(OS); 11476 } 11477 11478 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11479 if (!VerifySCEV) 11480 return; 11481 11482 SE->verify(); 11483 } 11484 11485 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11486 AU.setPreservesAll(); 11487 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11488 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11489 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11490 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11491 } 11492 11493 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11494 const SCEV *RHS) { 11495 FoldingSetNodeID ID; 11496 assert(LHS->getType() == RHS->getType() && 11497 "Type mismatch between LHS and RHS"); 11498 // Unique this node based on the arguments 11499 ID.AddInteger(SCEVPredicate::P_Equal); 11500 ID.AddPointer(LHS); 11501 ID.AddPointer(RHS); 11502 void *IP = nullptr; 11503 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11504 return S; 11505 SCEVEqualPredicate *Eq = new (SCEVAllocator) 11506 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 11507 UniquePreds.InsertNode(Eq, IP); 11508 return Eq; 11509 } 11510 11511 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 11512 const SCEVAddRecExpr *AR, 11513 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11514 FoldingSetNodeID ID; 11515 // Unique this node based on the arguments 11516 ID.AddInteger(SCEVPredicate::P_Wrap); 11517 ID.AddPointer(AR); 11518 ID.AddInteger(AddedFlags); 11519 void *IP = nullptr; 11520 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11521 return S; 11522 auto *OF = new (SCEVAllocator) 11523 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 11524 UniquePreds.InsertNode(OF, IP); 11525 return OF; 11526 } 11527 11528 namespace { 11529 11530 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 11531 public: 11532 11533 /// Rewrites \p S in the context of a loop L and the SCEV predication 11534 /// infrastructure. 11535 /// 11536 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 11537 /// equivalences present in \p Pred. 11538 /// 11539 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 11540 /// \p NewPreds such that the result will be an AddRecExpr. 11541 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 11542 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11543 SCEVUnionPredicate *Pred) { 11544 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 11545 return Rewriter.visit(S); 11546 } 11547 11548 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11549 if (Pred) { 11550 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 11551 for (auto *Pred : ExprPreds) 11552 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 11553 if (IPred->getLHS() == Expr) 11554 return IPred->getRHS(); 11555 } 11556 return convertToAddRecWithPreds(Expr); 11557 } 11558 11559 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 11560 const SCEV *Operand = visit(Expr->getOperand()); 11561 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11562 if (AR && AR->getLoop() == L && AR->isAffine()) { 11563 // This couldn't be folded because the operand didn't have the nuw 11564 // flag. Add the nusw flag as an assumption that we could make. 11565 const SCEV *Step = AR->getStepRecurrence(SE); 11566 Type *Ty = Expr->getType(); 11567 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 11568 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 11569 SE.getSignExtendExpr(Step, Ty), L, 11570 AR->getNoWrapFlags()); 11571 } 11572 return SE.getZeroExtendExpr(Operand, Expr->getType()); 11573 } 11574 11575 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 11576 const SCEV *Operand = visit(Expr->getOperand()); 11577 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11578 if (AR && AR->getLoop() == L && AR->isAffine()) { 11579 // This couldn't be folded because the operand didn't have the nsw 11580 // flag. Add the nssw flag as an assumption that we could make. 11581 const SCEV *Step = AR->getStepRecurrence(SE); 11582 Type *Ty = Expr->getType(); 11583 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 11584 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 11585 SE.getSignExtendExpr(Step, Ty), L, 11586 AR->getNoWrapFlags()); 11587 } 11588 return SE.getSignExtendExpr(Operand, Expr->getType()); 11589 } 11590 11591 private: 11592 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 11593 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11594 SCEVUnionPredicate *Pred) 11595 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 11596 11597 bool addOverflowAssumption(const SCEVPredicate *P) { 11598 if (!NewPreds) { 11599 // Check if we've already made this assumption. 11600 return Pred && Pred->implies(P); 11601 } 11602 NewPreds->insert(P); 11603 return true; 11604 } 11605 11606 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 11607 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11608 auto *A = SE.getWrapPredicate(AR, AddedFlags); 11609 return addOverflowAssumption(A); 11610 } 11611 11612 // If \p Expr represents a PHINode, we try to see if it can be represented 11613 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 11614 // to add this predicate as a runtime overflow check, we return the AddRec. 11615 // If \p Expr does not meet these conditions (is not a PHI node, or we 11616 // couldn't create an AddRec for it, or couldn't add the predicate), we just 11617 // return \p Expr. 11618 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 11619 if (!isa<PHINode>(Expr->getValue())) 11620 return Expr; 11621 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 11622 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 11623 if (!PredicatedRewrite) 11624 return Expr; 11625 for (auto *P : PredicatedRewrite->second){ 11626 // Wrap predicates from outer loops are not supported. 11627 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 11628 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 11629 if (L != AR->getLoop()) 11630 return Expr; 11631 } 11632 if (!addOverflowAssumption(P)) 11633 return Expr; 11634 } 11635 return PredicatedRewrite->first; 11636 } 11637 11638 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 11639 SCEVUnionPredicate *Pred; 11640 const Loop *L; 11641 }; 11642 11643 } // end anonymous namespace 11644 11645 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 11646 SCEVUnionPredicate &Preds) { 11647 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 11648 } 11649 11650 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 11651 const SCEV *S, const Loop *L, 11652 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 11653 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 11654 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 11655 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 11656 11657 if (!AddRec) 11658 return nullptr; 11659 11660 // Since the transformation was successful, we can now transfer the SCEV 11661 // predicates. 11662 for (auto *P : TransformPreds) 11663 Preds.insert(P); 11664 11665 return AddRec; 11666 } 11667 11668 /// SCEV predicates 11669 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 11670 SCEVPredicateKind Kind) 11671 : FastID(ID), Kind(Kind) {} 11672 11673 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 11674 const SCEV *LHS, const SCEV *RHS) 11675 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 11676 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 11677 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 11678 } 11679 11680 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 11681 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 11682 11683 if (!Op) 11684 return false; 11685 11686 return Op->LHS == LHS && Op->RHS == RHS; 11687 } 11688 11689 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 11690 11691 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 11692 11693 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 11694 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 11695 } 11696 11697 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 11698 const SCEVAddRecExpr *AR, 11699 IncrementWrapFlags Flags) 11700 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 11701 11702 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 11703 11704 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 11705 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 11706 11707 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 11708 } 11709 11710 bool SCEVWrapPredicate::isAlwaysTrue() const { 11711 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 11712 IncrementWrapFlags IFlags = Flags; 11713 11714 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 11715 IFlags = clearFlags(IFlags, IncrementNSSW); 11716 11717 return IFlags == IncrementAnyWrap; 11718 } 11719 11720 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 11721 OS.indent(Depth) << *getExpr() << " Added Flags: "; 11722 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 11723 OS << "<nusw>"; 11724 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 11725 OS << "<nssw>"; 11726 OS << "\n"; 11727 } 11728 11729 SCEVWrapPredicate::IncrementWrapFlags 11730 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 11731 ScalarEvolution &SE) { 11732 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 11733 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 11734 11735 // We can safely transfer the NSW flag as NSSW. 11736 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 11737 ImpliedFlags = IncrementNSSW; 11738 11739 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 11740 // If the increment is positive, the SCEV NUW flag will also imply the 11741 // WrapPredicate NUSW flag. 11742 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 11743 if (Step->getValue()->getValue().isNonNegative()) 11744 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 11745 } 11746 11747 return ImpliedFlags; 11748 } 11749 11750 /// Union predicates don't get cached so create a dummy set ID for it. 11751 SCEVUnionPredicate::SCEVUnionPredicate() 11752 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 11753 11754 bool SCEVUnionPredicate::isAlwaysTrue() const { 11755 return all_of(Preds, 11756 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 11757 } 11758 11759 ArrayRef<const SCEVPredicate *> 11760 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 11761 auto I = SCEVToPreds.find(Expr); 11762 if (I == SCEVToPreds.end()) 11763 return ArrayRef<const SCEVPredicate *>(); 11764 return I->second; 11765 } 11766 11767 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 11768 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 11769 return all_of(Set->Preds, 11770 [this](const SCEVPredicate *I) { return this->implies(I); }); 11771 11772 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 11773 if (ScevPredsIt == SCEVToPreds.end()) 11774 return false; 11775 auto &SCEVPreds = ScevPredsIt->second; 11776 11777 return any_of(SCEVPreds, 11778 [N](const SCEVPredicate *I) { return I->implies(N); }); 11779 } 11780 11781 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 11782 11783 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 11784 for (auto Pred : Preds) 11785 Pred->print(OS, Depth); 11786 } 11787 11788 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 11789 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 11790 for (auto Pred : Set->Preds) 11791 add(Pred); 11792 return; 11793 } 11794 11795 if (implies(N)) 11796 return; 11797 11798 const SCEV *Key = N->getExpr(); 11799 assert(Key && "Only SCEVUnionPredicate doesn't have an " 11800 " associated expression!"); 11801 11802 SCEVToPreds[Key].push_back(N); 11803 Preds.push_back(N); 11804 } 11805 11806 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 11807 Loop &L) 11808 : SE(SE), L(L) {} 11809 11810 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 11811 const SCEV *Expr = SE.getSCEV(V); 11812 RewriteEntry &Entry = RewriteMap[Expr]; 11813 11814 // If we already have an entry and the version matches, return it. 11815 if (Entry.second && Generation == Entry.first) 11816 return Entry.second; 11817 11818 // We found an entry but it's stale. Rewrite the stale entry 11819 // according to the current predicate. 11820 if (Entry.second) 11821 Expr = Entry.second; 11822 11823 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 11824 Entry = {Generation, NewSCEV}; 11825 11826 return NewSCEV; 11827 } 11828 11829 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 11830 if (!BackedgeCount) { 11831 SCEVUnionPredicate BackedgePred; 11832 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 11833 addPredicate(BackedgePred); 11834 } 11835 return BackedgeCount; 11836 } 11837 11838 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 11839 if (Preds.implies(&Pred)) 11840 return; 11841 Preds.add(&Pred); 11842 updateGeneration(); 11843 } 11844 11845 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 11846 return Preds; 11847 } 11848 11849 void PredicatedScalarEvolution::updateGeneration() { 11850 // If the generation number wrapped recompute everything. 11851 if (++Generation == 0) { 11852 for (auto &II : RewriteMap) { 11853 const SCEV *Rewritten = II.second.second; 11854 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 11855 } 11856 } 11857 } 11858 11859 void PredicatedScalarEvolution::setNoOverflow( 11860 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11861 const SCEV *Expr = getSCEV(V); 11862 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11863 11864 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 11865 11866 // Clear the statically implied flags. 11867 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 11868 addPredicate(*SE.getWrapPredicate(AR, Flags)); 11869 11870 auto II = FlagsMap.insert({V, Flags}); 11871 if (!II.second) 11872 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 11873 } 11874 11875 bool PredicatedScalarEvolution::hasNoOverflow( 11876 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11877 const SCEV *Expr = getSCEV(V); 11878 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11879 11880 Flags = SCEVWrapPredicate::clearFlags( 11881 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 11882 11883 auto II = FlagsMap.find(V); 11884 11885 if (II != FlagsMap.end()) 11886 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 11887 11888 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 11889 } 11890 11891 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 11892 const SCEV *Expr = this->getSCEV(V); 11893 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 11894 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 11895 11896 if (!New) 11897 return nullptr; 11898 11899 for (auto *P : NewPreds) 11900 Preds.add(P); 11901 11902 updateGeneration(); 11903 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 11904 return New; 11905 } 11906 11907 PredicatedScalarEvolution::PredicatedScalarEvolution( 11908 const PredicatedScalarEvolution &Init) 11909 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 11910 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 11911 for (const auto &I : Init.FlagsMap) 11912 FlagsMap.insert(I); 11913 } 11914 11915 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 11916 // For each block. 11917 for (auto *BB : L.getBlocks()) 11918 for (auto &I : *BB) { 11919 if (!SE.isSCEVable(I.getType())) 11920 continue; 11921 11922 auto *Expr = SE.getSCEV(&I); 11923 auto II = RewriteMap.find(Expr); 11924 11925 if (II == RewriteMap.end()) 11926 continue; 11927 11928 // Don't print things that are not interesting. 11929 if (II->second.second == Expr) 11930 continue; 11931 11932 OS.indent(Depth) << "[PSE]" << I << ":\n"; 11933 OS.indent(Depth + 2) << *Expr << "\n"; 11934 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 11935 } 11936 } 11937