1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DepthFirstIterator.h" 66 #include "llvm/ADT/EquivalenceClasses.h" 67 #include "llvm/ADT/FoldingSet.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/ScopeExit.h" 72 #include "llvm/ADT/Sequence.h" 73 #include "llvm/ADT/SetVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallSet.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/Statistic.h" 78 #include "llvm/ADT/StringRef.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/ConstantFolding.h" 81 #include "llvm/Analysis/InstructionSimplify.h" 82 #include "llvm/Analysis/LoopInfo.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/IR/Argument.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/CallSite.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/Pass.h" 115 #include "llvm/Support/Casting.h" 116 #include "llvm/Support/CommandLine.h" 117 #include "llvm/Support/Compiler.h" 118 #include "llvm/Support/Debug.h" 119 #include "llvm/Support/ErrorHandling.h" 120 #include "llvm/Support/KnownBits.h" 121 #include "llvm/Support/SaveAndRestore.h" 122 #include "llvm/Support/raw_ostream.h" 123 #include <algorithm> 124 #include <cassert> 125 #include <climits> 126 #include <cstddef> 127 #include <cstdint> 128 #include <cstdlib> 129 #include <map> 130 #include <memory> 131 #include <tuple> 132 #include <utility> 133 #include <vector> 134 135 using namespace llvm; 136 137 #define DEBUG_TYPE "scalar-evolution" 138 139 STATISTIC(NumArrayLenItCounts, 140 "Number of trip counts computed with array length"); 141 STATISTIC(NumTripCountsComputed, 142 "Number of loops with predictable loop counts"); 143 STATISTIC(NumTripCountsNotComputed, 144 "Number of loops without predictable loop counts"); 145 STATISTIC(NumBruteForceTripCountsComputed, 146 "Number of loops with trip counts computed by force"); 147 148 static cl::opt<unsigned> 149 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 150 cl::desc("Maximum number of iterations SCEV will " 151 "symbolically execute a constant " 152 "derived loop"), 153 cl::init(100)); 154 155 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 156 static cl::opt<bool> VerifySCEV( 157 "verify-scev", cl::Hidden, 158 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 159 static cl::opt<bool> 160 VerifySCEVMap("verify-scev-maps", cl::Hidden, 161 cl::desc("Verify no dangling value in ScalarEvolution's " 162 "ExprValueMap (slow)")); 163 164 static cl::opt<unsigned> MulOpsInlineThreshold( 165 "scev-mulops-inline-threshold", cl::Hidden, 166 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 167 cl::init(32)); 168 169 static cl::opt<unsigned> AddOpsInlineThreshold( 170 "scev-addops-inline-threshold", cl::Hidden, 171 cl::desc("Threshold for inlining addition operands into a SCEV"), 172 cl::init(500)); 173 174 static cl::opt<unsigned> MaxSCEVCompareDepth( 175 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 176 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 177 cl::init(32)); 178 179 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 180 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 181 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 182 cl::init(2)); 183 184 static cl::opt<unsigned> MaxValueCompareDepth( 185 "scalar-evolution-max-value-compare-depth", cl::Hidden, 186 cl::desc("Maximum depth of recursive value complexity comparisons"), 187 cl::init(2)); 188 189 static cl::opt<unsigned> 190 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 191 cl::desc("Maximum depth of recursive arithmetics"), 192 cl::init(32)); 193 194 static cl::opt<unsigned> MaxConstantEvolvingDepth( 195 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 196 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 197 198 static cl::opt<unsigned> 199 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 200 cl::desc("Maximum depth of recursive SExt/ZExt"), 201 cl::init(8)); 202 203 static cl::opt<unsigned> 204 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 205 cl::desc("Max coefficients in AddRec during evolving"), 206 cl::init(16)); 207 208 //===----------------------------------------------------------------------===// 209 // SCEV class definitions 210 //===----------------------------------------------------------------------===// 211 212 //===----------------------------------------------------------------------===// 213 // Implementation of the SCEV class. 214 // 215 216 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 217 LLVM_DUMP_METHOD void SCEV::dump() const { 218 print(dbgs()); 219 dbgs() << '\n'; 220 } 221 #endif 222 223 void SCEV::print(raw_ostream &OS) const { 224 switch (static_cast<SCEVTypes>(getSCEVType())) { 225 case scConstant: 226 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 227 return; 228 case scTruncate: { 229 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 230 const SCEV *Op = Trunc->getOperand(); 231 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 232 << *Trunc->getType() << ")"; 233 return; 234 } 235 case scZeroExtend: { 236 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 237 const SCEV *Op = ZExt->getOperand(); 238 OS << "(zext " << *Op->getType() << " " << *Op << " to " 239 << *ZExt->getType() << ")"; 240 return; 241 } 242 case scSignExtend: { 243 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 244 const SCEV *Op = SExt->getOperand(); 245 OS << "(sext " << *Op->getType() << " " << *Op << " to " 246 << *SExt->getType() << ")"; 247 return; 248 } 249 case scAddRecExpr: { 250 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 251 OS << "{" << *AR->getOperand(0); 252 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 253 OS << ",+," << *AR->getOperand(i); 254 OS << "}<"; 255 if (AR->hasNoUnsignedWrap()) 256 OS << "nuw><"; 257 if (AR->hasNoSignedWrap()) 258 OS << "nsw><"; 259 if (AR->hasNoSelfWrap() && 260 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 261 OS << "nw><"; 262 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 263 OS << ">"; 264 return; 265 } 266 case scAddExpr: 267 case scMulExpr: 268 case scUMaxExpr: 269 case scSMaxExpr: { 270 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 271 const char *OpStr = nullptr; 272 switch (NAry->getSCEVType()) { 273 case scAddExpr: OpStr = " + "; break; 274 case scMulExpr: OpStr = " * "; break; 275 case scUMaxExpr: OpStr = " umax "; break; 276 case scSMaxExpr: OpStr = " smax "; break; 277 } 278 OS << "("; 279 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 280 I != E; ++I) { 281 OS << **I; 282 if (std::next(I) != E) 283 OS << OpStr; 284 } 285 OS << ")"; 286 switch (NAry->getSCEVType()) { 287 case scAddExpr: 288 case scMulExpr: 289 if (NAry->hasNoUnsignedWrap()) 290 OS << "<nuw>"; 291 if (NAry->hasNoSignedWrap()) 292 OS << "<nsw>"; 293 } 294 return; 295 } 296 case scUDivExpr: { 297 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 298 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 299 return; 300 } 301 case scUnknown: { 302 const SCEVUnknown *U = cast<SCEVUnknown>(this); 303 Type *AllocTy; 304 if (U->isSizeOf(AllocTy)) { 305 OS << "sizeof(" << *AllocTy << ")"; 306 return; 307 } 308 if (U->isAlignOf(AllocTy)) { 309 OS << "alignof(" << *AllocTy << ")"; 310 return; 311 } 312 313 Type *CTy; 314 Constant *FieldNo; 315 if (U->isOffsetOf(CTy, FieldNo)) { 316 OS << "offsetof(" << *CTy << ", "; 317 FieldNo->printAsOperand(OS, false); 318 OS << ")"; 319 return; 320 } 321 322 // Otherwise just print it normally. 323 U->getValue()->printAsOperand(OS, false); 324 return; 325 } 326 case scCouldNotCompute: 327 OS << "***COULDNOTCOMPUTE***"; 328 return; 329 } 330 llvm_unreachable("Unknown SCEV kind!"); 331 } 332 333 Type *SCEV::getType() const { 334 switch (static_cast<SCEVTypes>(getSCEVType())) { 335 case scConstant: 336 return cast<SCEVConstant>(this)->getType(); 337 case scTruncate: 338 case scZeroExtend: 339 case scSignExtend: 340 return cast<SCEVCastExpr>(this)->getType(); 341 case scAddRecExpr: 342 case scMulExpr: 343 case scUMaxExpr: 344 case scSMaxExpr: 345 return cast<SCEVNAryExpr>(this)->getType(); 346 case scAddExpr: 347 return cast<SCEVAddExpr>(this)->getType(); 348 case scUDivExpr: 349 return cast<SCEVUDivExpr>(this)->getType(); 350 case scUnknown: 351 return cast<SCEVUnknown>(this)->getType(); 352 case scCouldNotCompute: 353 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 354 } 355 llvm_unreachable("Unknown SCEV kind!"); 356 } 357 358 bool SCEV::isZero() const { 359 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 360 return SC->getValue()->isZero(); 361 return false; 362 } 363 364 bool SCEV::isOne() const { 365 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 366 return SC->getValue()->isOne(); 367 return false; 368 } 369 370 bool SCEV::isAllOnesValue() const { 371 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 372 return SC->getValue()->isMinusOne(); 373 return false; 374 } 375 376 bool SCEV::isNonConstantNegative() const { 377 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 378 if (!Mul) return false; 379 380 // If there is a constant factor, it will be first. 381 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 382 if (!SC) return false; 383 384 // Return true if the value is negative, this matches things like (-42 * V). 385 return SC->getAPInt().isNegative(); 386 } 387 388 SCEVCouldNotCompute::SCEVCouldNotCompute() : 389 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 390 391 bool SCEVCouldNotCompute::classof(const SCEV *S) { 392 return S->getSCEVType() == scCouldNotCompute; 393 } 394 395 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 396 FoldingSetNodeID ID; 397 ID.AddInteger(scConstant); 398 ID.AddPointer(V); 399 void *IP = nullptr; 400 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 401 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 402 UniqueSCEVs.InsertNode(S, IP); 403 return S; 404 } 405 406 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 407 return getConstant(ConstantInt::get(getContext(), Val)); 408 } 409 410 const SCEV * 411 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 412 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 413 return getConstant(ConstantInt::get(ITy, V, isSigned)); 414 } 415 416 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 417 unsigned SCEVTy, const SCEV *op, Type *ty) 418 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 419 420 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 421 const SCEV *op, Type *ty) 422 : SCEVCastExpr(ID, scTruncate, op, ty) { 423 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 424 (Ty->isIntegerTy() || Ty->isPointerTy()) && 425 "Cannot truncate non-integer value!"); 426 } 427 428 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 429 const SCEV *op, Type *ty) 430 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 431 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 432 (Ty->isIntegerTy() || Ty->isPointerTy()) && 433 "Cannot zero extend non-integer value!"); 434 } 435 436 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 437 const SCEV *op, Type *ty) 438 : SCEVCastExpr(ID, scSignExtend, op, ty) { 439 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 440 (Ty->isIntegerTy() || Ty->isPointerTy()) && 441 "Cannot sign extend non-integer value!"); 442 } 443 444 void SCEVUnknown::deleted() { 445 // Clear this SCEVUnknown from various maps. 446 SE->forgetMemoizedResults(this); 447 448 // Remove this SCEVUnknown from the uniquing map. 449 SE->UniqueSCEVs.RemoveNode(this); 450 451 // Release the value. 452 setValPtr(nullptr); 453 } 454 455 void SCEVUnknown::allUsesReplacedWith(Value *New) { 456 // Remove this SCEVUnknown from the uniquing map. 457 SE->UniqueSCEVs.RemoveNode(this); 458 459 // Update this SCEVUnknown to point to the new value. This is needed 460 // because there may still be outstanding SCEVs which still point to 461 // this SCEVUnknown. 462 setValPtr(New); 463 } 464 465 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 466 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 467 if (VCE->getOpcode() == Instruction::PtrToInt) 468 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 469 if (CE->getOpcode() == Instruction::GetElementPtr && 470 CE->getOperand(0)->isNullValue() && 471 CE->getNumOperands() == 2) 472 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 473 if (CI->isOne()) { 474 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 475 ->getElementType(); 476 return true; 477 } 478 479 return false; 480 } 481 482 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 483 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 484 if (VCE->getOpcode() == Instruction::PtrToInt) 485 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 486 if (CE->getOpcode() == Instruction::GetElementPtr && 487 CE->getOperand(0)->isNullValue()) { 488 Type *Ty = 489 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 490 if (StructType *STy = dyn_cast<StructType>(Ty)) 491 if (!STy->isPacked() && 492 CE->getNumOperands() == 3 && 493 CE->getOperand(1)->isNullValue()) { 494 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 495 if (CI->isOne() && 496 STy->getNumElements() == 2 && 497 STy->getElementType(0)->isIntegerTy(1)) { 498 AllocTy = STy->getElementType(1); 499 return true; 500 } 501 } 502 } 503 504 return false; 505 } 506 507 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 508 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 509 if (VCE->getOpcode() == Instruction::PtrToInt) 510 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 511 if (CE->getOpcode() == Instruction::GetElementPtr && 512 CE->getNumOperands() == 3 && 513 CE->getOperand(0)->isNullValue() && 514 CE->getOperand(1)->isNullValue()) { 515 Type *Ty = 516 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 517 // Ignore vector types here so that ScalarEvolutionExpander doesn't 518 // emit getelementptrs that index into vectors. 519 if (Ty->isStructTy() || Ty->isArrayTy()) { 520 CTy = Ty; 521 FieldNo = CE->getOperand(2); 522 return true; 523 } 524 } 525 526 return false; 527 } 528 529 //===----------------------------------------------------------------------===// 530 // SCEV Utilities 531 //===----------------------------------------------------------------------===// 532 533 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 534 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 535 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 536 /// have been previously deemed to be "equally complex" by this routine. It is 537 /// intended to avoid exponential time complexity in cases like: 538 /// 539 /// %a = f(%x, %y) 540 /// %b = f(%a, %a) 541 /// %c = f(%b, %b) 542 /// 543 /// %d = f(%x, %y) 544 /// %e = f(%d, %d) 545 /// %f = f(%e, %e) 546 /// 547 /// CompareValueComplexity(%f, %c) 548 /// 549 /// Since we do not continue running this routine on expression trees once we 550 /// have seen unequal values, there is no need to track them in the cache. 551 static int 552 CompareValueComplexity(EquivalenceClasses<Value *> &EqCache, 553 const LoopInfo *const LI, Value *LV, Value *RV, 554 unsigned Depth) { 555 if (Depth > MaxValueCompareDepth || EqCache.isEquivalent(LV, RV)) 556 return 0; 557 558 // Order pointer values after integer values. This helps SCEVExpander form 559 // GEPs. 560 bool LIsPointer = LV->getType()->isPointerTy(), 561 RIsPointer = RV->getType()->isPointerTy(); 562 if (LIsPointer != RIsPointer) 563 return (int)LIsPointer - (int)RIsPointer; 564 565 // Compare getValueID values. 566 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 567 if (LID != RID) 568 return (int)LID - (int)RID; 569 570 // Sort arguments by their position. 571 if (const auto *LA = dyn_cast<Argument>(LV)) { 572 const auto *RA = cast<Argument>(RV); 573 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 574 return (int)LArgNo - (int)RArgNo; 575 } 576 577 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 578 const auto *RGV = cast<GlobalValue>(RV); 579 580 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 581 auto LT = GV->getLinkage(); 582 return !(GlobalValue::isPrivateLinkage(LT) || 583 GlobalValue::isInternalLinkage(LT)); 584 }; 585 586 // Use the names to distinguish the two values, but only if the 587 // names are semantically important. 588 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 589 return LGV->getName().compare(RGV->getName()); 590 } 591 592 // For instructions, compare their loop depth, and their operand count. This 593 // is pretty loose. 594 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 595 const auto *RInst = cast<Instruction>(RV); 596 597 // Compare loop depths. 598 const BasicBlock *LParent = LInst->getParent(), 599 *RParent = RInst->getParent(); 600 if (LParent != RParent) { 601 unsigned LDepth = LI->getLoopDepth(LParent), 602 RDepth = LI->getLoopDepth(RParent); 603 if (LDepth != RDepth) 604 return (int)LDepth - (int)RDepth; 605 } 606 607 // Compare the number of operands. 608 unsigned LNumOps = LInst->getNumOperands(), 609 RNumOps = RInst->getNumOperands(); 610 if (LNumOps != RNumOps) 611 return (int)LNumOps - (int)RNumOps; 612 613 for (unsigned Idx : seq(0u, LNumOps)) { 614 int Result = 615 CompareValueComplexity(EqCache, LI, LInst->getOperand(Idx), 616 RInst->getOperand(Idx), Depth + 1); 617 if (Result != 0) 618 return Result; 619 } 620 } 621 622 EqCache.unionSets(LV, RV); 623 return 0; 624 } 625 626 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 627 // than RHS, respectively. A three-way result allows recursive comparisons to be 628 // more efficient. 629 static int CompareSCEVComplexity( 630 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 631 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 632 DominatorTree &DT, unsigned Depth = 0) { 633 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 634 if (LHS == RHS) 635 return 0; 636 637 // Primarily, sort the SCEVs by their getSCEVType(). 638 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 639 if (LType != RType) 640 return (int)LType - (int)RType; 641 642 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 643 return 0; 644 // Aside from the getSCEVType() ordering, the particular ordering 645 // isn't very important except that it's beneficial to be consistent, 646 // so that (a + b) and (b + a) don't end up as different expressions. 647 switch (static_cast<SCEVTypes>(LType)) { 648 case scUnknown: { 649 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 650 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 651 652 EquivalenceClasses<Value *> EqCache; 653 int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), 654 Depth + 1); 655 if (X == 0) 656 EqCacheSCEV.unionSets(LHS, RHS); 657 return X; 658 } 659 660 case scConstant: { 661 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 662 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 663 664 // Compare constant values. 665 const APInt &LA = LC->getAPInt(); 666 const APInt &RA = RC->getAPInt(); 667 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 668 if (LBitWidth != RBitWidth) 669 return (int)LBitWidth - (int)RBitWidth; 670 return LA.ult(RA) ? -1 : 1; 671 } 672 673 case scAddRecExpr: { 674 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 675 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 676 677 // There is always a dominance between two recs that are used by one SCEV, 678 // so we can safely sort recs by loop header dominance. We require such 679 // order in getAddExpr. 680 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 681 if (LLoop != RLoop) { 682 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 683 assert(LHead != RHead && "Two loops share the same header?"); 684 if (DT.dominates(LHead, RHead)) 685 return 1; 686 else 687 assert(DT.dominates(RHead, LHead) && 688 "No dominance between recurrences used by one SCEV?"); 689 return -1; 690 } 691 692 // Addrec complexity grows with operand count. 693 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 694 if (LNumOps != RNumOps) 695 return (int)LNumOps - (int)RNumOps; 696 697 // Lexicographically compare. 698 for (unsigned i = 0; i != LNumOps; ++i) { 699 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LA->getOperand(i), 700 RA->getOperand(i), DT, Depth + 1); 701 if (X != 0) 702 return X; 703 } 704 EqCacheSCEV.unionSets(LHS, RHS); 705 return 0; 706 } 707 708 case scAddExpr: 709 case scMulExpr: 710 case scSMaxExpr: 711 case scUMaxExpr: { 712 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 713 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 714 715 // Lexicographically compare n-ary expressions. 716 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 717 if (LNumOps != RNumOps) 718 return (int)LNumOps - (int)RNumOps; 719 720 for (unsigned i = 0; i != LNumOps; ++i) { 721 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(i), 722 RC->getOperand(i), DT, Depth + 1); 723 if (X != 0) 724 return X; 725 } 726 EqCacheSCEV.unionSets(LHS, RHS); 727 return 0; 728 } 729 730 case scUDivExpr: { 731 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 732 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 733 734 // Lexicographically compare udiv expressions. 735 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), 736 DT, Depth + 1); 737 if (X != 0) 738 return X; 739 X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getRHS(), RC->getRHS(), DT, 740 Depth + 1); 741 if (X == 0) 742 EqCacheSCEV.unionSets(LHS, RHS); 743 return X; 744 } 745 746 case scTruncate: 747 case scZeroExtend: 748 case scSignExtend: { 749 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 750 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 751 752 // Compare cast expressions by operand. 753 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), 754 RC->getOperand(), DT, Depth + 1); 755 if (X == 0) 756 EqCacheSCEV.unionSets(LHS, RHS); 757 return X; 758 } 759 760 case scCouldNotCompute: 761 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 762 } 763 llvm_unreachable("Unknown SCEV kind!"); 764 } 765 766 /// Given a list of SCEV objects, order them by their complexity, and group 767 /// objects of the same complexity together by value. When this routine is 768 /// finished, we know that any duplicates in the vector are consecutive and that 769 /// complexity is monotonically increasing. 770 /// 771 /// Note that we go take special precautions to ensure that we get deterministic 772 /// results from this routine. In other words, we don't want the results of 773 /// this to depend on where the addresses of various SCEV objects happened to 774 /// land in memory. 775 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 776 LoopInfo *LI, DominatorTree &DT) { 777 if (Ops.size() < 2) return; // Noop 778 779 EquivalenceClasses<const SCEV *> EqCache; 780 if (Ops.size() == 2) { 781 // This is the common case, which also happens to be trivially simple. 782 // Special case it. 783 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 784 if (CompareSCEVComplexity(EqCache, LI, RHS, LHS, DT) < 0) 785 std::swap(LHS, RHS); 786 return; 787 } 788 789 // Do the rough sort by complexity. 790 std::stable_sort(Ops.begin(), Ops.end(), 791 [&EqCache, LI, &DT](const SCEV *LHS, const SCEV *RHS) { 792 return 793 CompareSCEVComplexity(EqCache, LI, LHS, RHS, DT) < 0; 794 }); 795 796 // Now that we are sorted by complexity, group elements of the same 797 // complexity. Note that this is, at worst, N^2, but the vector is likely to 798 // be extremely short in practice. Note that we take this approach because we 799 // do not want to depend on the addresses of the objects we are grouping. 800 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 801 const SCEV *S = Ops[i]; 802 unsigned Complexity = S->getSCEVType(); 803 804 // If there are any objects of the same complexity and same value as this 805 // one, group them. 806 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 807 if (Ops[j] == S) { // Found a duplicate. 808 // Move it to immediately after i'th element. 809 std::swap(Ops[i+1], Ops[j]); 810 ++i; // no need to rescan it. 811 if (i == e-2) return; // Done! 812 } 813 } 814 } 815 } 816 817 // Returns the size of the SCEV S. 818 static inline int sizeOfSCEV(const SCEV *S) { 819 struct FindSCEVSize { 820 int Size = 0; 821 822 FindSCEVSize() = default; 823 824 bool follow(const SCEV *S) { 825 ++Size; 826 // Keep looking at all operands of S. 827 return true; 828 } 829 830 bool isDone() const { 831 return false; 832 } 833 }; 834 835 FindSCEVSize F; 836 SCEVTraversal<FindSCEVSize> ST(F); 837 ST.visitAll(S); 838 return F.Size; 839 } 840 841 namespace { 842 843 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 844 public: 845 // Computes the Quotient and Remainder of the division of Numerator by 846 // Denominator. 847 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 848 const SCEV *Denominator, const SCEV **Quotient, 849 const SCEV **Remainder) { 850 assert(Numerator && Denominator && "Uninitialized SCEV"); 851 852 SCEVDivision D(SE, Numerator, Denominator); 853 854 // Check for the trivial case here to avoid having to check for it in the 855 // rest of the code. 856 if (Numerator == Denominator) { 857 *Quotient = D.One; 858 *Remainder = D.Zero; 859 return; 860 } 861 862 if (Numerator->isZero()) { 863 *Quotient = D.Zero; 864 *Remainder = D.Zero; 865 return; 866 } 867 868 // A simple case when N/1. The quotient is N. 869 if (Denominator->isOne()) { 870 *Quotient = Numerator; 871 *Remainder = D.Zero; 872 return; 873 } 874 875 // Split the Denominator when it is a product. 876 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 877 const SCEV *Q, *R; 878 *Quotient = Numerator; 879 for (const SCEV *Op : T->operands()) { 880 divide(SE, *Quotient, Op, &Q, &R); 881 *Quotient = Q; 882 883 // Bail out when the Numerator is not divisible by one of the terms of 884 // the Denominator. 885 if (!R->isZero()) { 886 *Quotient = D.Zero; 887 *Remainder = Numerator; 888 return; 889 } 890 } 891 *Remainder = D.Zero; 892 return; 893 } 894 895 D.visit(Numerator); 896 *Quotient = D.Quotient; 897 *Remainder = D.Remainder; 898 } 899 900 // Except in the trivial case described above, we do not know how to divide 901 // Expr by Denominator for the following functions with empty implementation. 902 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 903 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 904 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 905 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 906 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 907 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 908 void visitUnknown(const SCEVUnknown *Numerator) {} 909 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 910 911 void visitConstant(const SCEVConstant *Numerator) { 912 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 913 APInt NumeratorVal = Numerator->getAPInt(); 914 APInt DenominatorVal = D->getAPInt(); 915 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 916 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 917 918 if (NumeratorBW > DenominatorBW) 919 DenominatorVal = DenominatorVal.sext(NumeratorBW); 920 else if (NumeratorBW < DenominatorBW) 921 NumeratorVal = NumeratorVal.sext(DenominatorBW); 922 923 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 924 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 925 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 926 Quotient = SE.getConstant(QuotientVal); 927 Remainder = SE.getConstant(RemainderVal); 928 return; 929 } 930 } 931 932 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 933 const SCEV *StartQ, *StartR, *StepQ, *StepR; 934 if (!Numerator->isAffine()) 935 return cannotDivide(Numerator); 936 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 937 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 938 // Bail out if the types do not match. 939 Type *Ty = Denominator->getType(); 940 if (Ty != StartQ->getType() || Ty != StartR->getType() || 941 Ty != StepQ->getType() || Ty != StepR->getType()) 942 return cannotDivide(Numerator); 943 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 944 Numerator->getNoWrapFlags()); 945 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 946 Numerator->getNoWrapFlags()); 947 } 948 949 void visitAddExpr(const SCEVAddExpr *Numerator) { 950 SmallVector<const SCEV *, 2> Qs, Rs; 951 Type *Ty = Denominator->getType(); 952 953 for (const SCEV *Op : Numerator->operands()) { 954 const SCEV *Q, *R; 955 divide(SE, Op, Denominator, &Q, &R); 956 957 // Bail out if types do not match. 958 if (Ty != Q->getType() || Ty != R->getType()) 959 return cannotDivide(Numerator); 960 961 Qs.push_back(Q); 962 Rs.push_back(R); 963 } 964 965 if (Qs.size() == 1) { 966 Quotient = Qs[0]; 967 Remainder = Rs[0]; 968 return; 969 } 970 971 Quotient = SE.getAddExpr(Qs); 972 Remainder = SE.getAddExpr(Rs); 973 } 974 975 void visitMulExpr(const SCEVMulExpr *Numerator) { 976 SmallVector<const SCEV *, 2> Qs; 977 Type *Ty = Denominator->getType(); 978 979 bool FoundDenominatorTerm = false; 980 for (const SCEV *Op : Numerator->operands()) { 981 // Bail out if types do not match. 982 if (Ty != Op->getType()) 983 return cannotDivide(Numerator); 984 985 if (FoundDenominatorTerm) { 986 Qs.push_back(Op); 987 continue; 988 } 989 990 // Check whether Denominator divides one of the product operands. 991 const SCEV *Q, *R; 992 divide(SE, Op, Denominator, &Q, &R); 993 if (!R->isZero()) { 994 Qs.push_back(Op); 995 continue; 996 } 997 998 // Bail out if types do not match. 999 if (Ty != Q->getType()) 1000 return cannotDivide(Numerator); 1001 1002 FoundDenominatorTerm = true; 1003 Qs.push_back(Q); 1004 } 1005 1006 if (FoundDenominatorTerm) { 1007 Remainder = Zero; 1008 if (Qs.size() == 1) 1009 Quotient = Qs[0]; 1010 else 1011 Quotient = SE.getMulExpr(Qs); 1012 return; 1013 } 1014 1015 if (!isa<SCEVUnknown>(Denominator)) 1016 return cannotDivide(Numerator); 1017 1018 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1019 ValueToValueMap RewriteMap; 1020 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1021 cast<SCEVConstant>(Zero)->getValue(); 1022 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1023 1024 if (Remainder->isZero()) { 1025 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1026 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1027 cast<SCEVConstant>(One)->getValue(); 1028 Quotient = 1029 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1030 return; 1031 } 1032 1033 // Quotient is (Numerator - Remainder) divided by Denominator. 1034 const SCEV *Q, *R; 1035 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1036 // This SCEV does not seem to simplify: fail the division here. 1037 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1038 return cannotDivide(Numerator); 1039 divide(SE, Diff, Denominator, &Q, &R); 1040 if (R != Zero) 1041 return cannotDivide(Numerator); 1042 Quotient = Q; 1043 } 1044 1045 private: 1046 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1047 const SCEV *Denominator) 1048 : SE(S), Denominator(Denominator) { 1049 Zero = SE.getZero(Denominator->getType()); 1050 One = SE.getOne(Denominator->getType()); 1051 1052 // We generally do not know how to divide Expr by Denominator. We 1053 // initialize the division to a "cannot divide" state to simplify the rest 1054 // of the code. 1055 cannotDivide(Numerator); 1056 } 1057 1058 // Convenience function for giving up on the division. We set the quotient to 1059 // be equal to zero and the remainder to be equal to the numerator. 1060 void cannotDivide(const SCEV *Numerator) { 1061 Quotient = Zero; 1062 Remainder = Numerator; 1063 } 1064 1065 ScalarEvolution &SE; 1066 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1067 }; 1068 1069 } // end anonymous namespace 1070 1071 //===----------------------------------------------------------------------===// 1072 // Simple SCEV method implementations 1073 //===----------------------------------------------------------------------===// 1074 1075 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1076 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1077 ScalarEvolution &SE, 1078 Type *ResultTy) { 1079 // Handle the simplest case efficiently. 1080 if (K == 1) 1081 return SE.getTruncateOrZeroExtend(It, ResultTy); 1082 1083 // We are using the following formula for BC(It, K): 1084 // 1085 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1086 // 1087 // Suppose, W is the bitwidth of the return value. We must be prepared for 1088 // overflow. Hence, we must assure that the result of our computation is 1089 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1090 // safe in modular arithmetic. 1091 // 1092 // However, this code doesn't use exactly that formula; the formula it uses 1093 // is something like the following, where T is the number of factors of 2 in 1094 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1095 // exponentiation: 1096 // 1097 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1098 // 1099 // This formula is trivially equivalent to the previous formula. However, 1100 // this formula can be implemented much more efficiently. The trick is that 1101 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1102 // arithmetic. To do exact division in modular arithmetic, all we have 1103 // to do is multiply by the inverse. Therefore, this step can be done at 1104 // width W. 1105 // 1106 // The next issue is how to safely do the division by 2^T. The way this 1107 // is done is by doing the multiplication step at a width of at least W + T 1108 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1109 // when we perform the division by 2^T (which is equivalent to a right shift 1110 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1111 // truncated out after the division by 2^T. 1112 // 1113 // In comparison to just directly using the first formula, this technique 1114 // is much more efficient; using the first formula requires W * K bits, 1115 // but this formula less than W + K bits. Also, the first formula requires 1116 // a division step, whereas this formula only requires multiplies and shifts. 1117 // 1118 // It doesn't matter whether the subtraction step is done in the calculation 1119 // width or the input iteration count's width; if the subtraction overflows, 1120 // the result must be zero anyway. We prefer here to do it in the width of 1121 // the induction variable because it helps a lot for certain cases; CodeGen 1122 // isn't smart enough to ignore the overflow, which leads to much less 1123 // efficient code if the width of the subtraction is wider than the native 1124 // register width. 1125 // 1126 // (It's possible to not widen at all by pulling out factors of 2 before 1127 // the multiplication; for example, K=2 can be calculated as 1128 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1129 // extra arithmetic, so it's not an obvious win, and it gets 1130 // much more complicated for K > 3.) 1131 1132 // Protection from insane SCEVs; this bound is conservative, 1133 // but it probably doesn't matter. 1134 if (K > 1000) 1135 return SE.getCouldNotCompute(); 1136 1137 unsigned W = SE.getTypeSizeInBits(ResultTy); 1138 1139 // Calculate K! / 2^T and T; we divide out the factors of two before 1140 // multiplying for calculating K! / 2^T to avoid overflow. 1141 // Other overflow doesn't matter because we only care about the bottom 1142 // W bits of the result. 1143 APInt OddFactorial(W, 1); 1144 unsigned T = 1; 1145 for (unsigned i = 3; i <= K; ++i) { 1146 APInt Mult(W, i); 1147 unsigned TwoFactors = Mult.countTrailingZeros(); 1148 T += TwoFactors; 1149 Mult.lshrInPlace(TwoFactors); 1150 OddFactorial *= Mult; 1151 } 1152 1153 // We need at least W + T bits for the multiplication step 1154 unsigned CalculationBits = W + T; 1155 1156 // Calculate 2^T, at width T+W. 1157 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1158 1159 // Calculate the multiplicative inverse of K! / 2^T; 1160 // this multiplication factor will perform the exact division by 1161 // K! / 2^T. 1162 APInt Mod = APInt::getSignedMinValue(W+1); 1163 APInt MultiplyFactor = OddFactorial.zext(W+1); 1164 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1165 MultiplyFactor = MultiplyFactor.trunc(W); 1166 1167 // Calculate the product, at width T+W 1168 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1169 CalculationBits); 1170 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1171 for (unsigned i = 1; i != K; ++i) { 1172 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1173 Dividend = SE.getMulExpr(Dividend, 1174 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1175 } 1176 1177 // Divide by 2^T 1178 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1179 1180 // Truncate the result, and divide by K! / 2^T. 1181 1182 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1183 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1184 } 1185 1186 /// Return the value of this chain of recurrences at the specified iteration 1187 /// number. We can evaluate this recurrence by multiplying each element in the 1188 /// chain by the binomial coefficient corresponding to it. In other words, we 1189 /// can evaluate {A,+,B,+,C,+,D} as: 1190 /// 1191 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1192 /// 1193 /// where BC(It, k) stands for binomial coefficient. 1194 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1195 ScalarEvolution &SE) const { 1196 const SCEV *Result = getStart(); 1197 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1198 // The computation is correct in the face of overflow provided that the 1199 // multiplication is performed _after_ the evaluation of the binomial 1200 // coefficient. 1201 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1202 if (isa<SCEVCouldNotCompute>(Coeff)) 1203 return Coeff; 1204 1205 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1206 } 1207 return Result; 1208 } 1209 1210 //===----------------------------------------------------------------------===// 1211 // SCEV Expression folder implementations 1212 //===----------------------------------------------------------------------===// 1213 1214 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1215 Type *Ty) { 1216 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1217 "This is not a truncating conversion!"); 1218 assert(isSCEVable(Ty) && 1219 "This is not a conversion to a SCEVable type!"); 1220 Ty = getEffectiveSCEVType(Ty); 1221 1222 FoldingSetNodeID ID; 1223 ID.AddInteger(scTruncate); 1224 ID.AddPointer(Op); 1225 ID.AddPointer(Ty); 1226 void *IP = nullptr; 1227 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1228 1229 // Fold if the operand is constant. 1230 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1231 return getConstant( 1232 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1233 1234 // trunc(trunc(x)) --> trunc(x) 1235 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1236 return getTruncateExpr(ST->getOperand(), Ty); 1237 1238 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1239 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1240 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1241 1242 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1243 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1244 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1245 1246 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1247 // eliminate all the truncates, or we replace other casts with truncates. 1248 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1249 SmallVector<const SCEV *, 4> Operands; 1250 bool hasTrunc = false; 1251 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1252 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1253 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1254 hasTrunc = isa<SCEVTruncateExpr>(S); 1255 Operands.push_back(S); 1256 } 1257 if (!hasTrunc) 1258 return getAddExpr(Operands); 1259 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1260 } 1261 1262 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1263 // eliminate all the truncates, or we replace other casts with truncates. 1264 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1265 SmallVector<const SCEV *, 4> Operands; 1266 bool hasTrunc = false; 1267 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1268 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1269 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1270 hasTrunc = isa<SCEVTruncateExpr>(S); 1271 Operands.push_back(S); 1272 } 1273 if (!hasTrunc) 1274 return getMulExpr(Operands); 1275 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1276 } 1277 1278 // If the input value is a chrec scev, truncate the chrec's operands. 1279 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1280 SmallVector<const SCEV *, 4> Operands; 1281 for (const SCEV *Op : AddRec->operands()) 1282 Operands.push_back(getTruncateExpr(Op, Ty)); 1283 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1284 } 1285 1286 // The cast wasn't folded; create an explicit cast node. We can reuse 1287 // the existing insert position since if we get here, we won't have 1288 // made any changes which would invalidate it. 1289 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1290 Op, Ty); 1291 UniqueSCEVs.InsertNode(S, IP); 1292 addToLoopUseLists(S); 1293 return S; 1294 } 1295 1296 // Get the limit of a recurrence such that incrementing by Step cannot cause 1297 // signed overflow as long as the value of the recurrence within the 1298 // loop does not exceed this limit before incrementing. 1299 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1300 ICmpInst::Predicate *Pred, 1301 ScalarEvolution *SE) { 1302 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1303 if (SE->isKnownPositive(Step)) { 1304 *Pred = ICmpInst::ICMP_SLT; 1305 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1306 SE->getSignedRangeMax(Step)); 1307 } 1308 if (SE->isKnownNegative(Step)) { 1309 *Pred = ICmpInst::ICMP_SGT; 1310 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1311 SE->getSignedRangeMin(Step)); 1312 } 1313 return nullptr; 1314 } 1315 1316 // Get the limit of a recurrence such that incrementing by Step cannot cause 1317 // unsigned overflow as long as the value of the recurrence within the loop does 1318 // not exceed this limit before incrementing. 1319 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1320 ICmpInst::Predicate *Pred, 1321 ScalarEvolution *SE) { 1322 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1323 *Pred = ICmpInst::ICMP_ULT; 1324 1325 return SE->getConstant(APInt::getMinValue(BitWidth) - 1326 SE->getUnsignedRangeMax(Step)); 1327 } 1328 1329 namespace { 1330 1331 struct ExtendOpTraitsBase { 1332 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1333 unsigned); 1334 }; 1335 1336 // Used to make code generic over signed and unsigned overflow. 1337 template <typename ExtendOp> struct ExtendOpTraits { 1338 // Members present: 1339 // 1340 // static const SCEV::NoWrapFlags WrapType; 1341 // 1342 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1343 // 1344 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1345 // ICmpInst::Predicate *Pred, 1346 // ScalarEvolution *SE); 1347 }; 1348 1349 template <> 1350 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1351 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1352 1353 static const GetExtendExprTy GetExtendExpr; 1354 1355 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1356 ICmpInst::Predicate *Pred, 1357 ScalarEvolution *SE) { 1358 return getSignedOverflowLimitForStep(Step, Pred, SE); 1359 } 1360 }; 1361 1362 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1363 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1364 1365 template <> 1366 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1367 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1368 1369 static const GetExtendExprTy GetExtendExpr; 1370 1371 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1372 ICmpInst::Predicate *Pred, 1373 ScalarEvolution *SE) { 1374 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1375 } 1376 }; 1377 1378 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1379 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1380 1381 } // end anonymous namespace 1382 1383 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1384 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1385 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1386 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1387 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1388 // expression "Step + sext/zext(PreIncAR)" is congruent with 1389 // "sext/zext(PostIncAR)" 1390 template <typename ExtendOpTy> 1391 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1392 ScalarEvolution *SE, unsigned Depth) { 1393 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1394 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1395 1396 const Loop *L = AR->getLoop(); 1397 const SCEV *Start = AR->getStart(); 1398 const SCEV *Step = AR->getStepRecurrence(*SE); 1399 1400 // Check for a simple looking step prior to loop entry. 1401 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1402 if (!SA) 1403 return nullptr; 1404 1405 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1406 // subtraction is expensive. For this purpose, perform a quick and dirty 1407 // difference, by checking for Step in the operand list. 1408 SmallVector<const SCEV *, 4> DiffOps; 1409 for (const SCEV *Op : SA->operands()) 1410 if (Op != Step) 1411 DiffOps.push_back(Op); 1412 1413 if (DiffOps.size() == SA->getNumOperands()) 1414 return nullptr; 1415 1416 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1417 // `Step`: 1418 1419 // 1. NSW/NUW flags on the step increment. 1420 auto PreStartFlags = 1421 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1422 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1423 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1424 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1425 1426 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1427 // "S+X does not sign/unsign-overflow". 1428 // 1429 1430 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1431 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1432 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1433 return PreStart; 1434 1435 // 2. Direct overflow check on the step operation's expression. 1436 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1437 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1438 const SCEV *OperandExtendedStart = 1439 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1440 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1441 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1442 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1443 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1444 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1445 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1446 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1447 } 1448 return PreStart; 1449 } 1450 1451 // 3. Loop precondition. 1452 ICmpInst::Predicate Pred; 1453 const SCEV *OverflowLimit = 1454 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1455 1456 if (OverflowLimit && 1457 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1458 return PreStart; 1459 1460 return nullptr; 1461 } 1462 1463 // Get the normalized zero or sign extended expression for this AddRec's Start. 1464 template <typename ExtendOpTy> 1465 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1466 ScalarEvolution *SE, 1467 unsigned Depth) { 1468 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1469 1470 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1471 if (!PreStart) 1472 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1473 1474 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1475 Depth), 1476 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1477 } 1478 1479 // Try to prove away overflow by looking at "nearby" add recurrences. A 1480 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1481 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1482 // 1483 // Formally: 1484 // 1485 // {S,+,X} == {S-T,+,X} + T 1486 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1487 // 1488 // If ({S-T,+,X} + T) does not overflow ... (1) 1489 // 1490 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1491 // 1492 // If {S-T,+,X} does not overflow ... (2) 1493 // 1494 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1495 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1496 // 1497 // If (S-T)+T does not overflow ... (3) 1498 // 1499 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1500 // == {Ext(S),+,Ext(X)} == LHS 1501 // 1502 // Thus, if (1), (2) and (3) are true for some T, then 1503 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1504 // 1505 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1506 // does not overflow" restricted to the 0th iteration. Therefore we only need 1507 // to check for (1) and (2). 1508 // 1509 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1510 // is `Delta` (defined below). 1511 template <typename ExtendOpTy> 1512 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1513 const SCEV *Step, 1514 const Loop *L) { 1515 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1516 1517 // We restrict `Start` to a constant to prevent SCEV from spending too much 1518 // time here. It is correct (but more expensive) to continue with a 1519 // non-constant `Start` and do a general SCEV subtraction to compute 1520 // `PreStart` below. 1521 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1522 if (!StartC) 1523 return false; 1524 1525 APInt StartAI = StartC->getAPInt(); 1526 1527 for (unsigned Delta : {-2, -1, 1, 2}) { 1528 const SCEV *PreStart = getConstant(StartAI - Delta); 1529 1530 FoldingSetNodeID ID; 1531 ID.AddInteger(scAddRecExpr); 1532 ID.AddPointer(PreStart); 1533 ID.AddPointer(Step); 1534 ID.AddPointer(L); 1535 void *IP = nullptr; 1536 const auto *PreAR = 1537 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1538 1539 // Give up if we don't already have the add recurrence we need because 1540 // actually constructing an add recurrence is relatively expensive. 1541 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1542 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1543 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1544 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1545 DeltaS, &Pred, this); 1546 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1547 return true; 1548 } 1549 } 1550 1551 return false; 1552 } 1553 1554 const SCEV * 1555 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1556 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1557 "This is not an extending conversion!"); 1558 assert(isSCEVable(Ty) && 1559 "This is not a conversion to a SCEVable type!"); 1560 Ty = getEffectiveSCEVType(Ty); 1561 1562 // Fold if the operand is constant. 1563 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1564 return getConstant( 1565 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1566 1567 // zext(zext(x)) --> zext(x) 1568 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1569 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1570 1571 // Before doing any expensive analysis, check to see if we've already 1572 // computed a SCEV for this Op and Ty. 1573 FoldingSetNodeID ID; 1574 ID.AddInteger(scZeroExtend); 1575 ID.AddPointer(Op); 1576 ID.AddPointer(Ty); 1577 void *IP = nullptr; 1578 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1579 if (Depth > MaxExtDepth) { 1580 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1581 Op, Ty); 1582 UniqueSCEVs.InsertNode(S, IP); 1583 addToLoopUseLists(S); 1584 return S; 1585 } 1586 1587 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1588 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1589 // It's possible the bits taken off by the truncate were all zero bits. If 1590 // so, we should be able to simplify this further. 1591 const SCEV *X = ST->getOperand(); 1592 ConstantRange CR = getUnsignedRange(X); 1593 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1594 unsigned NewBits = getTypeSizeInBits(Ty); 1595 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1596 CR.zextOrTrunc(NewBits))) 1597 return getTruncateOrZeroExtend(X, Ty); 1598 } 1599 1600 // If the input value is a chrec scev, and we can prove that the value 1601 // did not overflow the old, smaller, value, we can zero extend all of the 1602 // operands (often constants). This allows analysis of something like 1603 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1604 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1605 if (AR->isAffine()) { 1606 const SCEV *Start = AR->getStart(); 1607 const SCEV *Step = AR->getStepRecurrence(*this); 1608 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1609 const Loop *L = AR->getLoop(); 1610 1611 if (!AR->hasNoUnsignedWrap()) { 1612 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1613 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1614 } 1615 1616 // If we have special knowledge that this addrec won't overflow, 1617 // we don't need to do any further analysis. 1618 if (AR->hasNoUnsignedWrap()) 1619 return getAddRecExpr( 1620 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1621 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1622 1623 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1624 // Note that this serves two purposes: It filters out loops that are 1625 // simply not analyzable, and it covers the case where this code is 1626 // being called from within backedge-taken count analysis, such that 1627 // attempting to ask for the backedge-taken count would likely result 1628 // in infinite recursion. In the later case, the analysis code will 1629 // cope with a conservative value, and it will take care to purge 1630 // that value once it has finished. 1631 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1632 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1633 // Manually compute the final value for AR, checking for 1634 // overflow. 1635 1636 // Check whether the backedge-taken count can be losslessly casted to 1637 // the addrec's type. The count is always unsigned. 1638 const SCEV *CastedMaxBECount = 1639 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1640 const SCEV *RecastedMaxBECount = 1641 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1642 if (MaxBECount == RecastedMaxBECount) { 1643 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1644 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1645 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1646 SCEV::FlagAnyWrap, Depth + 1); 1647 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1648 SCEV::FlagAnyWrap, 1649 Depth + 1), 1650 WideTy, Depth + 1); 1651 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1652 const SCEV *WideMaxBECount = 1653 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1654 const SCEV *OperandExtendedAdd = 1655 getAddExpr(WideStart, 1656 getMulExpr(WideMaxBECount, 1657 getZeroExtendExpr(Step, WideTy, Depth + 1), 1658 SCEV::FlagAnyWrap, Depth + 1), 1659 SCEV::FlagAnyWrap, Depth + 1); 1660 if (ZAdd == OperandExtendedAdd) { 1661 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1662 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1663 // Return the expression with the addrec on the outside. 1664 return getAddRecExpr( 1665 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1666 Depth + 1), 1667 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1668 AR->getNoWrapFlags()); 1669 } 1670 // Similar to above, only this time treat the step value as signed. 1671 // This covers loops that count down. 1672 OperandExtendedAdd = 1673 getAddExpr(WideStart, 1674 getMulExpr(WideMaxBECount, 1675 getSignExtendExpr(Step, WideTy, Depth + 1), 1676 SCEV::FlagAnyWrap, Depth + 1), 1677 SCEV::FlagAnyWrap, Depth + 1); 1678 if (ZAdd == OperandExtendedAdd) { 1679 // Cache knowledge of AR NW, which is propagated to this AddRec. 1680 // Negative step causes unsigned wrap, but it still can't self-wrap. 1681 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1682 // Return the expression with the addrec on the outside. 1683 return getAddRecExpr( 1684 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1685 Depth + 1), 1686 getSignExtendExpr(Step, Ty, Depth + 1), L, 1687 AR->getNoWrapFlags()); 1688 } 1689 } 1690 } 1691 1692 // Normally, in the cases we can prove no-overflow via a 1693 // backedge guarding condition, we can also compute a backedge 1694 // taken count for the loop. The exceptions are assumptions and 1695 // guards present in the loop -- SCEV is not great at exploiting 1696 // these to compute max backedge taken counts, but can still use 1697 // these to prove lack of overflow. Use this fact to avoid 1698 // doing extra work that may not pay off. 1699 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1700 !AC.assumptions().empty()) { 1701 // If the backedge is guarded by a comparison with the pre-inc 1702 // value the addrec is safe. Also, if the entry is guarded by 1703 // a comparison with the start value and the backedge is 1704 // guarded by a comparison with the post-inc value, the addrec 1705 // is safe. 1706 if (isKnownPositive(Step)) { 1707 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1708 getUnsignedRangeMax(Step)); 1709 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1710 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1711 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1712 AR->getPostIncExpr(*this), N))) { 1713 // Cache knowledge of AR NUW, which is propagated to this 1714 // AddRec. 1715 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1716 // Return the expression with the addrec on the outside. 1717 return getAddRecExpr( 1718 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1719 Depth + 1), 1720 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1721 AR->getNoWrapFlags()); 1722 } 1723 } else if (isKnownNegative(Step)) { 1724 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1725 getSignedRangeMin(Step)); 1726 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1727 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1728 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1729 AR->getPostIncExpr(*this), N))) { 1730 // Cache knowledge of AR NW, which is propagated to this 1731 // AddRec. Negative step causes unsigned wrap, but it 1732 // still can't self-wrap. 1733 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1734 // Return the expression with the addrec on the outside. 1735 return getAddRecExpr( 1736 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1737 Depth + 1), 1738 getSignExtendExpr(Step, Ty, Depth + 1), L, 1739 AR->getNoWrapFlags()); 1740 } 1741 } 1742 } 1743 1744 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1745 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1746 return getAddRecExpr( 1747 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1748 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1749 } 1750 } 1751 1752 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1753 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1754 if (SA->hasNoUnsignedWrap()) { 1755 // If the addition does not unsign overflow then we can, by definition, 1756 // commute the zero extension with the addition operation. 1757 SmallVector<const SCEV *, 4> Ops; 1758 for (const auto *Op : SA->operands()) 1759 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1760 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1761 } 1762 } 1763 1764 // The cast wasn't folded; create an explicit cast node. 1765 // Recompute the insert position, as it may have been invalidated. 1766 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1767 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1768 Op, Ty); 1769 UniqueSCEVs.InsertNode(S, IP); 1770 addToLoopUseLists(S); 1771 return S; 1772 } 1773 1774 const SCEV * 1775 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1776 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1777 "This is not an extending conversion!"); 1778 assert(isSCEVable(Ty) && 1779 "This is not a conversion to a SCEVable type!"); 1780 Ty = getEffectiveSCEVType(Ty); 1781 1782 // Fold if the operand is constant. 1783 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1784 return getConstant( 1785 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1786 1787 // sext(sext(x)) --> sext(x) 1788 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1789 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1790 1791 // sext(zext(x)) --> zext(x) 1792 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1793 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1794 1795 // Before doing any expensive analysis, check to see if we've already 1796 // computed a SCEV for this Op and Ty. 1797 FoldingSetNodeID ID; 1798 ID.AddInteger(scSignExtend); 1799 ID.AddPointer(Op); 1800 ID.AddPointer(Ty); 1801 void *IP = nullptr; 1802 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1803 // Limit recursion depth. 1804 if (Depth > MaxExtDepth) { 1805 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1806 Op, Ty); 1807 UniqueSCEVs.InsertNode(S, IP); 1808 addToLoopUseLists(S); 1809 return S; 1810 } 1811 1812 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1813 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1814 // It's possible the bits taken off by the truncate were all sign bits. If 1815 // so, we should be able to simplify this further. 1816 const SCEV *X = ST->getOperand(); 1817 ConstantRange CR = getSignedRange(X); 1818 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1819 unsigned NewBits = getTypeSizeInBits(Ty); 1820 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1821 CR.sextOrTrunc(NewBits))) 1822 return getTruncateOrSignExtend(X, Ty); 1823 } 1824 1825 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1826 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1827 if (SA->getNumOperands() == 2) { 1828 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1829 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1830 if (SMul && SC1) { 1831 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1832 const APInt &C1 = SC1->getAPInt(); 1833 const APInt &C2 = SC2->getAPInt(); 1834 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1835 C2.ugt(C1) && C2.isPowerOf2()) 1836 return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), 1837 getSignExtendExpr(SMul, Ty, Depth + 1), 1838 SCEV::FlagAnyWrap, Depth + 1); 1839 } 1840 } 1841 } 1842 1843 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1844 if (SA->hasNoSignedWrap()) { 1845 // If the addition does not sign overflow then we can, by definition, 1846 // commute the sign extension with the addition operation. 1847 SmallVector<const SCEV *, 4> Ops; 1848 for (const auto *Op : SA->operands()) 1849 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1850 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1851 } 1852 } 1853 // If the input value is a chrec scev, and we can prove that the value 1854 // did not overflow the old, smaller, value, we can sign extend all of the 1855 // operands (often constants). This allows analysis of something like 1856 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1857 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1858 if (AR->isAffine()) { 1859 const SCEV *Start = AR->getStart(); 1860 const SCEV *Step = AR->getStepRecurrence(*this); 1861 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1862 const Loop *L = AR->getLoop(); 1863 1864 if (!AR->hasNoSignedWrap()) { 1865 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1866 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1867 } 1868 1869 // If we have special knowledge that this addrec won't overflow, 1870 // we don't need to do any further analysis. 1871 if (AR->hasNoSignedWrap()) 1872 return getAddRecExpr( 1873 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1874 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1875 1876 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1877 // Note that this serves two purposes: It filters out loops that are 1878 // simply not analyzable, and it covers the case where this code is 1879 // being called from within backedge-taken count analysis, such that 1880 // attempting to ask for the backedge-taken count would likely result 1881 // in infinite recursion. In the later case, the analysis code will 1882 // cope with a conservative value, and it will take care to purge 1883 // that value once it has finished. 1884 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1885 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1886 // Manually compute the final value for AR, checking for 1887 // overflow. 1888 1889 // Check whether the backedge-taken count can be losslessly casted to 1890 // the addrec's type. The count is always unsigned. 1891 const SCEV *CastedMaxBECount = 1892 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1893 const SCEV *RecastedMaxBECount = 1894 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1895 if (MaxBECount == RecastedMaxBECount) { 1896 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1897 // Check whether Start+Step*MaxBECount has no signed overflow. 1898 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1899 SCEV::FlagAnyWrap, Depth + 1); 1900 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1901 SCEV::FlagAnyWrap, 1902 Depth + 1), 1903 WideTy, Depth + 1); 1904 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1905 const SCEV *WideMaxBECount = 1906 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1907 const SCEV *OperandExtendedAdd = 1908 getAddExpr(WideStart, 1909 getMulExpr(WideMaxBECount, 1910 getSignExtendExpr(Step, WideTy, Depth + 1), 1911 SCEV::FlagAnyWrap, Depth + 1), 1912 SCEV::FlagAnyWrap, Depth + 1); 1913 if (SAdd == OperandExtendedAdd) { 1914 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1915 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1916 // Return the expression with the addrec on the outside. 1917 return getAddRecExpr( 1918 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1919 Depth + 1), 1920 getSignExtendExpr(Step, Ty, Depth + 1), L, 1921 AR->getNoWrapFlags()); 1922 } 1923 // Similar to above, only this time treat the step value as unsigned. 1924 // This covers loops that count up with an unsigned step. 1925 OperandExtendedAdd = 1926 getAddExpr(WideStart, 1927 getMulExpr(WideMaxBECount, 1928 getZeroExtendExpr(Step, WideTy, Depth + 1), 1929 SCEV::FlagAnyWrap, Depth + 1), 1930 SCEV::FlagAnyWrap, Depth + 1); 1931 if (SAdd == OperandExtendedAdd) { 1932 // If AR wraps around then 1933 // 1934 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1935 // => SAdd != OperandExtendedAdd 1936 // 1937 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1938 // (SAdd == OperandExtendedAdd => AR is NW) 1939 1940 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1941 1942 // Return the expression with the addrec on the outside. 1943 return getAddRecExpr( 1944 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1945 Depth + 1), 1946 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1947 AR->getNoWrapFlags()); 1948 } 1949 } 1950 } 1951 1952 // Normally, in the cases we can prove no-overflow via a 1953 // backedge guarding condition, we can also compute a backedge 1954 // taken count for the loop. The exceptions are assumptions and 1955 // guards present in the loop -- SCEV is not great at exploiting 1956 // these to compute max backedge taken counts, but can still use 1957 // these to prove lack of overflow. Use this fact to avoid 1958 // doing extra work that may not pay off. 1959 1960 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1961 !AC.assumptions().empty()) { 1962 // If the backedge is guarded by a comparison with the pre-inc 1963 // value the addrec is safe. Also, if the entry is guarded by 1964 // a comparison with the start value and the backedge is 1965 // guarded by a comparison with the post-inc value, the addrec 1966 // is safe. 1967 ICmpInst::Predicate Pred; 1968 const SCEV *OverflowLimit = 1969 getSignedOverflowLimitForStep(Step, &Pred, this); 1970 if (OverflowLimit && 1971 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1972 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1973 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1974 OverflowLimit)))) { 1975 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1976 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1977 return getAddRecExpr( 1978 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1979 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1980 } 1981 } 1982 1983 // If Start and Step are constants, check if we can apply this 1984 // transformation: 1985 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1986 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1987 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1988 if (SC1 && SC2) { 1989 const APInt &C1 = SC1->getAPInt(); 1990 const APInt &C2 = SC2->getAPInt(); 1991 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1992 C2.isPowerOf2()) { 1993 Start = getSignExtendExpr(Start, Ty, Depth + 1); 1994 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1995 AR->getNoWrapFlags()); 1996 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), 1997 SCEV::FlagAnyWrap, Depth + 1); 1998 } 1999 } 2000 2001 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2002 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2003 return getAddRecExpr( 2004 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2005 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2006 } 2007 } 2008 2009 // If the input value is provably positive and we could not simplify 2010 // away the sext build a zext instead. 2011 if (isKnownNonNegative(Op)) 2012 return getZeroExtendExpr(Op, Ty, Depth + 1); 2013 2014 // The cast wasn't folded; create an explicit cast node. 2015 // Recompute the insert position, as it may have been invalidated. 2016 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2017 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2018 Op, Ty); 2019 UniqueSCEVs.InsertNode(S, IP); 2020 addToLoopUseLists(S); 2021 return S; 2022 } 2023 2024 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2025 /// unspecified bits out to the given type. 2026 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2027 Type *Ty) { 2028 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2029 "This is not an extending conversion!"); 2030 assert(isSCEVable(Ty) && 2031 "This is not a conversion to a SCEVable type!"); 2032 Ty = getEffectiveSCEVType(Ty); 2033 2034 // Sign-extend negative constants. 2035 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2036 if (SC->getAPInt().isNegative()) 2037 return getSignExtendExpr(Op, Ty); 2038 2039 // Peel off a truncate cast. 2040 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2041 const SCEV *NewOp = T->getOperand(); 2042 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2043 return getAnyExtendExpr(NewOp, Ty); 2044 return getTruncateOrNoop(NewOp, Ty); 2045 } 2046 2047 // Next try a zext cast. If the cast is folded, use it. 2048 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2049 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2050 return ZExt; 2051 2052 // Next try a sext cast. If the cast is folded, use it. 2053 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2054 if (!isa<SCEVSignExtendExpr>(SExt)) 2055 return SExt; 2056 2057 // Force the cast to be folded into the operands of an addrec. 2058 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2059 SmallVector<const SCEV *, 4> Ops; 2060 for (const SCEV *Op : AR->operands()) 2061 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2062 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2063 } 2064 2065 // If the expression is obviously signed, use the sext cast value. 2066 if (isa<SCEVSMaxExpr>(Op)) 2067 return SExt; 2068 2069 // Absent any other information, use the zext cast value. 2070 return ZExt; 2071 } 2072 2073 /// Process the given Ops list, which is a list of operands to be added under 2074 /// the given scale, update the given map. This is a helper function for 2075 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2076 /// that would form an add expression like this: 2077 /// 2078 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2079 /// 2080 /// where A and B are constants, update the map with these values: 2081 /// 2082 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2083 /// 2084 /// and add 13 + A*B*29 to AccumulatedConstant. 2085 /// This will allow getAddRecExpr to produce this: 2086 /// 2087 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2088 /// 2089 /// This form often exposes folding opportunities that are hidden in 2090 /// the original operand list. 2091 /// 2092 /// Return true iff it appears that any interesting folding opportunities 2093 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2094 /// the common case where no interesting opportunities are present, and 2095 /// is also used as a check to avoid infinite recursion. 2096 static bool 2097 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2098 SmallVectorImpl<const SCEV *> &NewOps, 2099 APInt &AccumulatedConstant, 2100 const SCEV *const *Ops, size_t NumOperands, 2101 const APInt &Scale, 2102 ScalarEvolution &SE) { 2103 bool Interesting = false; 2104 2105 // Iterate over the add operands. They are sorted, with constants first. 2106 unsigned i = 0; 2107 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2108 ++i; 2109 // Pull a buried constant out to the outside. 2110 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2111 Interesting = true; 2112 AccumulatedConstant += Scale * C->getAPInt(); 2113 } 2114 2115 // Next comes everything else. We're especially interested in multiplies 2116 // here, but they're in the middle, so just visit the rest with one loop. 2117 for (; i != NumOperands; ++i) { 2118 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2119 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2120 APInt NewScale = 2121 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2122 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2123 // A multiplication of a constant with another add; recurse. 2124 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2125 Interesting |= 2126 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2127 Add->op_begin(), Add->getNumOperands(), 2128 NewScale, SE); 2129 } else { 2130 // A multiplication of a constant with some other value. Update 2131 // the map. 2132 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2133 const SCEV *Key = SE.getMulExpr(MulOps); 2134 auto Pair = M.insert({Key, NewScale}); 2135 if (Pair.second) { 2136 NewOps.push_back(Pair.first->first); 2137 } else { 2138 Pair.first->second += NewScale; 2139 // The map already had an entry for this value, which may indicate 2140 // a folding opportunity. 2141 Interesting = true; 2142 } 2143 } 2144 } else { 2145 // An ordinary operand. Update the map. 2146 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2147 M.insert({Ops[i], Scale}); 2148 if (Pair.second) { 2149 NewOps.push_back(Pair.first->first); 2150 } else { 2151 Pair.first->second += Scale; 2152 // The map already had an entry for this value, which may indicate 2153 // a folding opportunity. 2154 Interesting = true; 2155 } 2156 } 2157 } 2158 2159 return Interesting; 2160 } 2161 2162 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2163 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2164 // can't-overflow flags for the operation if possible. 2165 static SCEV::NoWrapFlags 2166 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2167 const SmallVectorImpl<const SCEV *> &Ops, 2168 SCEV::NoWrapFlags Flags) { 2169 using namespace std::placeholders; 2170 2171 using OBO = OverflowingBinaryOperator; 2172 2173 bool CanAnalyze = 2174 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2175 (void)CanAnalyze; 2176 assert(CanAnalyze && "don't call from other places!"); 2177 2178 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2179 SCEV::NoWrapFlags SignOrUnsignWrap = 2180 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2181 2182 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2183 auto IsKnownNonNegative = [&](const SCEV *S) { 2184 return SE->isKnownNonNegative(S); 2185 }; 2186 2187 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2188 Flags = 2189 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2190 2191 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2192 2193 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2194 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2195 2196 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2197 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2198 2199 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2200 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2201 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2202 Instruction::Add, C, OBO::NoSignedWrap); 2203 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2204 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2205 } 2206 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2207 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2208 Instruction::Add, C, OBO::NoUnsignedWrap); 2209 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2210 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2211 } 2212 } 2213 2214 return Flags; 2215 } 2216 2217 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2218 if (!isLoopInvariant(S, L)) 2219 return false; 2220 // If a value depends on a SCEVUnknown which is defined after the loop, we 2221 // conservatively assume that we cannot calculate it at the loop's entry. 2222 struct FindDominatedSCEVUnknown { 2223 bool Found = false; 2224 const Loop *L; 2225 DominatorTree &DT; 2226 LoopInfo &LI; 2227 2228 FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI) 2229 : L(L), DT(DT), LI(LI) {} 2230 2231 bool checkSCEVUnknown(const SCEVUnknown *SU) { 2232 if (auto *I = dyn_cast<Instruction>(SU->getValue())) { 2233 if (DT.dominates(L->getHeader(), I->getParent())) 2234 Found = true; 2235 else 2236 assert(DT.dominates(I->getParent(), L->getHeader()) && 2237 "No dominance relationship between SCEV and loop?"); 2238 } 2239 return false; 2240 } 2241 2242 bool follow(const SCEV *S) { 2243 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 2244 case scConstant: 2245 return false; 2246 case scAddRecExpr: 2247 case scTruncate: 2248 case scZeroExtend: 2249 case scSignExtend: 2250 case scAddExpr: 2251 case scMulExpr: 2252 case scUMaxExpr: 2253 case scSMaxExpr: 2254 case scUDivExpr: 2255 return true; 2256 case scUnknown: 2257 return checkSCEVUnknown(cast<SCEVUnknown>(S)); 2258 case scCouldNotCompute: 2259 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2260 } 2261 return false; 2262 } 2263 2264 bool isDone() { return Found; } 2265 }; 2266 2267 FindDominatedSCEVUnknown FSU(L, DT, LI); 2268 SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU); 2269 ST.visitAll(S); 2270 return !FSU.Found; 2271 } 2272 2273 /// Get a canonical add expression, or something simpler if possible. 2274 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2275 SCEV::NoWrapFlags Flags, 2276 unsigned Depth) { 2277 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2278 "only nuw or nsw allowed"); 2279 assert(!Ops.empty() && "Cannot get empty add!"); 2280 if (Ops.size() == 1) return Ops[0]; 2281 #ifndef NDEBUG 2282 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2283 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2284 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2285 "SCEVAddExpr operand types don't match!"); 2286 #endif 2287 2288 // Sort by complexity, this groups all similar expression types together. 2289 GroupByComplexity(Ops, &LI, DT); 2290 2291 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2292 2293 // If there are any constants, fold them together. 2294 unsigned Idx = 0; 2295 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2296 ++Idx; 2297 assert(Idx < Ops.size()); 2298 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2299 // We found two constants, fold them together! 2300 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2301 if (Ops.size() == 2) return Ops[0]; 2302 Ops.erase(Ops.begin()+1); // Erase the folded element 2303 LHSC = cast<SCEVConstant>(Ops[0]); 2304 } 2305 2306 // If we are left with a constant zero being added, strip it off. 2307 if (LHSC->getValue()->isZero()) { 2308 Ops.erase(Ops.begin()); 2309 --Idx; 2310 } 2311 2312 if (Ops.size() == 1) return Ops[0]; 2313 } 2314 2315 // Limit recursion calls depth. 2316 if (Depth > MaxArithDepth) 2317 return getOrCreateAddExpr(Ops, Flags); 2318 2319 // Okay, check to see if the same value occurs in the operand list more than 2320 // once. If so, merge them together into an multiply expression. Since we 2321 // sorted the list, these values are required to be adjacent. 2322 Type *Ty = Ops[0]->getType(); 2323 bool FoundMatch = false; 2324 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2325 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2326 // Scan ahead to count how many equal operands there are. 2327 unsigned Count = 2; 2328 while (i+Count != e && Ops[i+Count] == Ops[i]) 2329 ++Count; 2330 // Merge the values into a multiply. 2331 const SCEV *Scale = getConstant(Ty, Count); 2332 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2333 if (Ops.size() == Count) 2334 return Mul; 2335 Ops[i] = Mul; 2336 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2337 --i; e -= Count - 1; 2338 FoundMatch = true; 2339 } 2340 if (FoundMatch) 2341 return getAddExpr(Ops, Flags); 2342 2343 // Check for truncates. If all the operands are truncated from the same 2344 // type, see if factoring out the truncate would permit the result to be 2345 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2346 // if the contents of the resulting outer trunc fold to something simple. 2347 auto FindTruncSrcType = [&]() -> Type * { 2348 // We're ultimately looking to fold an addrec of truncs and muls of only 2349 // constants and truncs, so if we find any other types of SCEV 2350 // as operands of the addrec then we bail and return nullptr here. 2351 // Otherwise, we return the type of the operand of a trunc that we find. 2352 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2353 return T->getOperand()->getType(); 2354 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2355 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2356 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2357 return T->getOperand()->getType(); 2358 } 2359 return nullptr; 2360 }; 2361 if (auto *SrcType = FindTruncSrcType()) { 2362 SmallVector<const SCEV *, 8> LargeOps; 2363 bool Ok = true; 2364 // Check all the operands to see if they can be represented in the 2365 // source type of the truncate. 2366 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2367 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2368 if (T->getOperand()->getType() != SrcType) { 2369 Ok = false; 2370 break; 2371 } 2372 LargeOps.push_back(T->getOperand()); 2373 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2374 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2375 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2376 SmallVector<const SCEV *, 8> LargeMulOps; 2377 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2378 if (const SCEVTruncateExpr *T = 2379 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2380 if (T->getOperand()->getType() != SrcType) { 2381 Ok = false; 2382 break; 2383 } 2384 LargeMulOps.push_back(T->getOperand()); 2385 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2386 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2387 } else { 2388 Ok = false; 2389 break; 2390 } 2391 } 2392 if (Ok) 2393 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2394 } else { 2395 Ok = false; 2396 break; 2397 } 2398 } 2399 if (Ok) { 2400 // Evaluate the expression in the larger type. 2401 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2402 // If it folds to something simple, use it. Otherwise, don't. 2403 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2404 return getTruncateExpr(Fold, Ty); 2405 } 2406 } 2407 2408 // Skip past any other cast SCEVs. 2409 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2410 ++Idx; 2411 2412 // If there are add operands they would be next. 2413 if (Idx < Ops.size()) { 2414 bool DeletedAdd = false; 2415 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2416 if (Ops.size() > AddOpsInlineThreshold || 2417 Add->getNumOperands() > AddOpsInlineThreshold) 2418 break; 2419 // If we have an add, expand the add operands onto the end of the operands 2420 // list. 2421 Ops.erase(Ops.begin()+Idx); 2422 Ops.append(Add->op_begin(), Add->op_end()); 2423 DeletedAdd = true; 2424 } 2425 2426 // If we deleted at least one add, we added operands to the end of the list, 2427 // and they are not necessarily sorted. Recurse to resort and resimplify 2428 // any operands we just acquired. 2429 if (DeletedAdd) 2430 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2431 } 2432 2433 // Skip over the add expression until we get to a multiply. 2434 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2435 ++Idx; 2436 2437 // Check to see if there are any folding opportunities present with 2438 // operands multiplied by constant values. 2439 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2440 uint64_t BitWidth = getTypeSizeInBits(Ty); 2441 DenseMap<const SCEV *, APInt> M; 2442 SmallVector<const SCEV *, 8> NewOps; 2443 APInt AccumulatedConstant(BitWidth, 0); 2444 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2445 Ops.data(), Ops.size(), 2446 APInt(BitWidth, 1), *this)) { 2447 struct APIntCompare { 2448 bool operator()(const APInt &LHS, const APInt &RHS) const { 2449 return LHS.ult(RHS); 2450 } 2451 }; 2452 2453 // Some interesting folding opportunity is present, so its worthwhile to 2454 // re-generate the operands list. Group the operands by constant scale, 2455 // to avoid multiplying by the same constant scale multiple times. 2456 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2457 for (const SCEV *NewOp : NewOps) 2458 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2459 // Re-generate the operands list. 2460 Ops.clear(); 2461 if (AccumulatedConstant != 0) 2462 Ops.push_back(getConstant(AccumulatedConstant)); 2463 for (auto &MulOp : MulOpLists) 2464 if (MulOp.first != 0) 2465 Ops.push_back(getMulExpr( 2466 getConstant(MulOp.first), 2467 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2468 SCEV::FlagAnyWrap, Depth + 1)); 2469 if (Ops.empty()) 2470 return getZero(Ty); 2471 if (Ops.size() == 1) 2472 return Ops[0]; 2473 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2474 } 2475 } 2476 2477 // If we are adding something to a multiply expression, make sure the 2478 // something is not already an operand of the multiply. If so, merge it into 2479 // the multiply. 2480 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2481 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2482 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2483 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2484 if (isa<SCEVConstant>(MulOpSCEV)) 2485 continue; 2486 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2487 if (MulOpSCEV == Ops[AddOp]) { 2488 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2489 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2490 if (Mul->getNumOperands() != 2) { 2491 // If the multiply has more than two operands, we must get the 2492 // Y*Z term. 2493 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2494 Mul->op_begin()+MulOp); 2495 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2496 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2497 } 2498 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2499 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2500 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2501 SCEV::FlagAnyWrap, Depth + 1); 2502 if (Ops.size() == 2) return OuterMul; 2503 if (AddOp < Idx) { 2504 Ops.erase(Ops.begin()+AddOp); 2505 Ops.erase(Ops.begin()+Idx-1); 2506 } else { 2507 Ops.erase(Ops.begin()+Idx); 2508 Ops.erase(Ops.begin()+AddOp-1); 2509 } 2510 Ops.push_back(OuterMul); 2511 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2512 } 2513 2514 // Check this multiply against other multiplies being added together. 2515 for (unsigned OtherMulIdx = Idx+1; 2516 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2517 ++OtherMulIdx) { 2518 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2519 // If MulOp occurs in OtherMul, we can fold the two multiplies 2520 // together. 2521 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2522 OMulOp != e; ++OMulOp) 2523 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2524 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2525 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2526 if (Mul->getNumOperands() != 2) { 2527 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2528 Mul->op_begin()+MulOp); 2529 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2530 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2531 } 2532 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2533 if (OtherMul->getNumOperands() != 2) { 2534 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2535 OtherMul->op_begin()+OMulOp); 2536 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2537 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2538 } 2539 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2540 const SCEV *InnerMulSum = 2541 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2542 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2543 SCEV::FlagAnyWrap, Depth + 1); 2544 if (Ops.size() == 2) return OuterMul; 2545 Ops.erase(Ops.begin()+Idx); 2546 Ops.erase(Ops.begin()+OtherMulIdx-1); 2547 Ops.push_back(OuterMul); 2548 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2549 } 2550 } 2551 } 2552 } 2553 2554 // If there are any add recurrences in the operands list, see if any other 2555 // added values are loop invariant. If so, we can fold them into the 2556 // recurrence. 2557 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2558 ++Idx; 2559 2560 // Scan over all recurrences, trying to fold loop invariants into them. 2561 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2562 // Scan all of the other operands to this add and add them to the vector if 2563 // they are loop invariant w.r.t. the recurrence. 2564 SmallVector<const SCEV *, 8> LIOps; 2565 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2566 const Loop *AddRecLoop = AddRec->getLoop(); 2567 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2568 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2569 LIOps.push_back(Ops[i]); 2570 Ops.erase(Ops.begin()+i); 2571 --i; --e; 2572 } 2573 2574 // If we found some loop invariants, fold them into the recurrence. 2575 if (!LIOps.empty()) { 2576 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2577 LIOps.push_back(AddRec->getStart()); 2578 2579 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2580 AddRec->op_end()); 2581 // This follows from the fact that the no-wrap flags on the outer add 2582 // expression are applicable on the 0th iteration, when the add recurrence 2583 // will be equal to its start value. 2584 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2585 2586 // Build the new addrec. Propagate the NUW and NSW flags if both the 2587 // outer add and the inner addrec are guaranteed to have no overflow. 2588 // Always propagate NW. 2589 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2590 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2591 2592 // If all of the other operands were loop invariant, we are done. 2593 if (Ops.size() == 1) return NewRec; 2594 2595 // Otherwise, add the folded AddRec by the non-invariant parts. 2596 for (unsigned i = 0;; ++i) 2597 if (Ops[i] == AddRec) { 2598 Ops[i] = NewRec; 2599 break; 2600 } 2601 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2602 } 2603 2604 // Okay, if there weren't any loop invariants to be folded, check to see if 2605 // there are multiple AddRec's with the same loop induction variable being 2606 // added together. If so, we can fold them. 2607 for (unsigned OtherIdx = Idx+1; 2608 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2609 ++OtherIdx) { 2610 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2611 // so that the 1st found AddRecExpr is dominated by all others. 2612 assert(DT.dominates( 2613 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2614 AddRec->getLoop()->getHeader()) && 2615 "AddRecExprs are not sorted in reverse dominance order?"); 2616 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2617 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2618 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2619 AddRec->op_end()); 2620 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2621 ++OtherIdx) { 2622 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2623 if (OtherAddRec->getLoop() == AddRecLoop) { 2624 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2625 i != e; ++i) { 2626 if (i >= AddRecOps.size()) { 2627 AddRecOps.append(OtherAddRec->op_begin()+i, 2628 OtherAddRec->op_end()); 2629 break; 2630 } 2631 SmallVector<const SCEV *, 2> TwoOps = { 2632 AddRecOps[i], OtherAddRec->getOperand(i)}; 2633 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2634 } 2635 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2636 } 2637 } 2638 // Step size has changed, so we cannot guarantee no self-wraparound. 2639 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2640 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2641 } 2642 } 2643 2644 // Otherwise couldn't fold anything into this recurrence. Move onto the 2645 // next one. 2646 } 2647 2648 // Okay, it looks like we really DO need an add expr. Check to see if we 2649 // already have one, otherwise create a new one. 2650 return getOrCreateAddExpr(Ops, Flags); 2651 } 2652 2653 const SCEV * 2654 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2655 SCEV::NoWrapFlags Flags) { 2656 FoldingSetNodeID ID; 2657 ID.AddInteger(scAddExpr); 2658 for (const SCEV *Op : Ops) 2659 ID.AddPointer(Op); 2660 void *IP = nullptr; 2661 SCEVAddExpr *S = 2662 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2663 if (!S) { 2664 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2665 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2666 S = new (SCEVAllocator) 2667 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2668 UniqueSCEVs.InsertNode(S, IP); 2669 addToLoopUseLists(S); 2670 } 2671 S->setNoWrapFlags(Flags); 2672 return S; 2673 } 2674 2675 const SCEV * 2676 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2677 SCEV::NoWrapFlags Flags) { 2678 FoldingSetNodeID ID; 2679 ID.AddInteger(scMulExpr); 2680 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2681 ID.AddPointer(Ops[i]); 2682 void *IP = nullptr; 2683 SCEVMulExpr *S = 2684 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2685 if (!S) { 2686 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2687 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2688 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2689 O, Ops.size()); 2690 UniqueSCEVs.InsertNode(S, IP); 2691 addToLoopUseLists(S); 2692 } 2693 S->setNoWrapFlags(Flags); 2694 return S; 2695 } 2696 2697 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2698 uint64_t k = i*j; 2699 if (j > 1 && k / j != i) Overflow = true; 2700 return k; 2701 } 2702 2703 /// Compute the result of "n choose k", the binomial coefficient. If an 2704 /// intermediate computation overflows, Overflow will be set and the return will 2705 /// be garbage. Overflow is not cleared on absence of overflow. 2706 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2707 // We use the multiplicative formula: 2708 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2709 // At each iteration, we take the n-th term of the numeral and divide by the 2710 // (k-n)th term of the denominator. This division will always produce an 2711 // integral result, and helps reduce the chance of overflow in the 2712 // intermediate computations. However, we can still overflow even when the 2713 // final result would fit. 2714 2715 if (n == 0 || n == k) return 1; 2716 if (k > n) return 0; 2717 2718 if (k > n/2) 2719 k = n-k; 2720 2721 uint64_t r = 1; 2722 for (uint64_t i = 1; i <= k; ++i) { 2723 r = umul_ov(r, n-(i-1), Overflow); 2724 r /= i; 2725 } 2726 return r; 2727 } 2728 2729 /// Determine if any of the operands in this SCEV are a constant or if 2730 /// any of the add or multiply expressions in this SCEV contain a constant. 2731 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2732 struct FindConstantInAddMulChain { 2733 bool FoundConstant = false; 2734 2735 bool follow(const SCEV *S) { 2736 FoundConstant |= isa<SCEVConstant>(S); 2737 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2738 } 2739 2740 bool isDone() const { 2741 return FoundConstant; 2742 } 2743 }; 2744 2745 FindConstantInAddMulChain F; 2746 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2747 ST.visitAll(StartExpr); 2748 return F.FoundConstant; 2749 } 2750 2751 /// Get a canonical multiply expression, or something simpler if possible. 2752 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2753 SCEV::NoWrapFlags Flags, 2754 unsigned Depth) { 2755 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2756 "only nuw or nsw allowed"); 2757 assert(!Ops.empty() && "Cannot get empty mul!"); 2758 if (Ops.size() == 1) return Ops[0]; 2759 #ifndef NDEBUG 2760 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2761 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2762 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2763 "SCEVMulExpr operand types don't match!"); 2764 #endif 2765 2766 // Sort by complexity, this groups all similar expression types together. 2767 GroupByComplexity(Ops, &LI, DT); 2768 2769 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2770 2771 // Limit recursion calls depth. 2772 if (Depth > MaxArithDepth) 2773 return getOrCreateMulExpr(Ops, Flags); 2774 2775 // If there are any constants, fold them together. 2776 unsigned Idx = 0; 2777 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2778 2779 // C1*(C2+V) -> C1*C2 + C1*V 2780 if (Ops.size() == 2) 2781 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2782 // If any of Add's ops are Adds or Muls with a constant, 2783 // apply this transformation as well. 2784 if (Add->getNumOperands() == 2) 2785 // TODO: There are some cases where this transformation is not 2786 // profitable, for example: 2787 // Add = (C0 + X) * Y + Z. 2788 // Maybe the scope of this transformation should be narrowed down. 2789 if (containsConstantInAddMulChain(Add)) 2790 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2791 SCEV::FlagAnyWrap, Depth + 1), 2792 getMulExpr(LHSC, Add->getOperand(1), 2793 SCEV::FlagAnyWrap, Depth + 1), 2794 SCEV::FlagAnyWrap, Depth + 1); 2795 2796 ++Idx; 2797 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2798 // We found two constants, fold them together! 2799 ConstantInt *Fold = 2800 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2801 Ops[0] = getConstant(Fold); 2802 Ops.erase(Ops.begin()+1); // Erase the folded element 2803 if (Ops.size() == 1) return Ops[0]; 2804 LHSC = cast<SCEVConstant>(Ops[0]); 2805 } 2806 2807 // If we are left with a constant one being multiplied, strip it off. 2808 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2809 Ops.erase(Ops.begin()); 2810 --Idx; 2811 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2812 // If we have a multiply of zero, it will always be zero. 2813 return Ops[0]; 2814 } else if (Ops[0]->isAllOnesValue()) { 2815 // If we have a mul by -1 of an add, try distributing the -1 among the 2816 // add operands. 2817 if (Ops.size() == 2) { 2818 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2819 SmallVector<const SCEV *, 4> NewOps; 2820 bool AnyFolded = false; 2821 for (const SCEV *AddOp : Add->operands()) { 2822 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2823 Depth + 1); 2824 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2825 NewOps.push_back(Mul); 2826 } 2827 if (AnyFolded) 2828 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2829 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2830 // Negation preserves a recurrence's no self-wrap property. 2831 SmallVector<const SCEV *, 4> Operands; 2832 for (const SCEV *AddRecOp : AddRec->operands()) 2833 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2834 Depth + 1)); 2835 2836 return getAddRecExpr(Operands, AddRec->getLoop(), 2837 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2838 } 2839 } 2840 } 2841 2842 if (Ops.size() == 1) 2843 return Ops[0]; 2844 } 2845 2846 // Skip over the add expression until we get to a multiply. 2847 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2848 ++Idx; 2849 2850 // If there are mul operands inline them all into this expression. 2851 if (Idx < Ops.size()) { 2852 bool DeletedMul = false; 2853 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2854 if (Ops.size() > MulOpsInlineThreshold) 2855 break; 2856 // If we have an mul, expand the mul operands onto the end of the 2857 // operands list. 2858 Ops.erase(Ops.begin()+Idx); 2859 Ops.append(Mul->op_begin(), Mul->op_end()); 2860 DeletedMul = true; 2861 } 2862 2863 // If we deleted at least one mul, we added operands to the end of the 2864 // list, and they are not necessarily sorted. Recurse to resort and 2865 // resimplify any operands we just acquired. 2866 if (DeletedMul) 2867 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2868 } 2869 2870 // If there are any add recurrences in the operands list, see if any other 2871 // added values are loop invariant. If so, we can fold them into the 2872 // recurrence. 2873 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2874 ++Idx; 2875 2876 // Scan over all recurrences, trying to fold loop invariants into them. 2877 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2878 // Scan all of the other operands to this mul and add them to the vector 2879 // if they are loop invariant w.r.t. the recurrence. 2880 SmallVector<const SCEV *, 8> LIOps; 2881 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2882 const Loop *AddRecLoop = AddRec->getLoop(); 2883 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2884 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2885 LIOps.push_back(Ops[i]); 2886 Ops.erase(Ops.begin()+i); 2887 --i; --e; 2888 } 2889 2890 // If we found some loop invariants, fold them into the recurrence. 2891 if (!LIOps.empty()) { 2892 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2893 SmallVector<const SCEV *, 4> NewOps; 2894 NewOps.reserve(AddRec->getNumOperands()); 2895 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2896 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2897 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2898 SCEV::FlagAnyWrap, Depth + 1)); 2899 2900 // Build the new addrec. Propagate the NUW and NSW flags if both the 2901 // outer mul and the inner addrec are guaranteed to have no overflow. 2902 // 2903 // No self-wrap cannot be guaranteed after changing the step size, but 2904 // will be inferred if either NUW or NSW is true. 2905 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2906 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2907 2908 // If all of the other operands were loop invariant, we are done. 2909 if (Ops.size() == 1) return NewRec; 2910 2911 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2912 for (unsigned i = 0;; ++i) 2913 if (Ops[i] == AddRec) { 2914 Ops[i] = NewRec; 2915 break; 2916 } 2917 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2918 } 2919 2920 // Okay, if there weren't any loop invariants to be folded, check to see 2921 // if there are multiple AddRec's with the same loop induction variable 2922 // being multiplied together. If so, we can fold them. 2923 2924 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2925 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2926 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2927 // ]]],+,...up to x=2n}. 2928 // Note that the arguments to choose() are always integers with values 2929 // known at compile time, never SCEV objects. 2930 // 2931 // The implementation avoids pointless extra computations when the two 2932 // addrec's are of different length (mathematically, it's equivalent to 2933 // an infinite stream of zeros on the right). 2934 bool OpsModified = false; 2935 for (unsigned OtherIdx = Idx+1; 2936 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2937 ++OtherIdx) { 2938 const SCEVAddRecExpr *OtherAddRec = 2939 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2940 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2941 continue; 2942 2943 // Limit max number of arguments to avoid creation of unreasonably big 2944 // SCEVAddRecs with very complex operands. 2945 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2946 MaxAddRecSize) 2947 continue; 2948 2949 bool Overflow = false; 2950 Type *Ty = AddRec->getType(); 2951 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2952 SmallVector<const SCEV*, 7> AddRecOps; 2953 for (int x = 0, xe = AddRec->getNumOperands() + 2954 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2955 const SCEV *Term = getZero(Ty); 2956 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2957 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2958 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2959 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2960 z < ze && !Overflow; ++z) { 2961 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2962 uint64_t Coeff; 2963 if (LargerThan64Bits) 2964 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2965 else 2966 Coeff = Coeff1*Coeff2; 2967 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2968 const SCEV *Term1 = AddRec->getOperand(y-z); 2969 const SCEV *Term2 = OtherAddRec->getOperand(z); 2970 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2971 SCEV::FlagAnyWrap, Depth + 1), 2972 SCEV::FlagAnyWrap, Depth + 1); 2973 } 2974 } 2975 AddRecOps.push_back(Term); 2976 } 2977 if (!Overflow) { 2978 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2979 SCEV::FlagAnyWrap); 2980 if (Ops.size() == 2) return NewAddRec; 2981 Ops[Idx] = NewAddRec; 2982 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2983 OpsModified = true; 2984 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2985 if (!AddRec) 2986 break; 2987 } 2988 } 2989 if (OpsModified) 2990 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2991 2992 // Otherwise couldn't fold anything into this recurrence. Move onto the 2993 // next one. 2994 } 2995 2996 // Okay, it looks like we really DO need an mul expr. Check to see if we 2997 // already have one, otherwise create a new one. 2998 return getOrCreateMulExpr(Ops, Flags); 2999 } 3000 3001 /// Represents an unsigned remainder expression based on unsigned division. 3002 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3003 const SCEV *RHS) { 3004 assert(getEffectiveSCEVType(LHS->getType()) == 3005 getEffectiveSCEVType(RHS->getType()) && 3006 "SCEVURemExpr operand types don't match!"); 3007 3008 // Short-circuit easy cases 3009 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3010 // If constant is one, the result is trivial 3011 if (RHSC->getValue()->isOne()) 3012 return getZero(LHS->getType()); // X urem 1 --> 0 3013 3014 // If constant is a power of two, fold into a zext(trunc(LHS)). 3015 if (RHSC->getAPInt().isPowerOf2()) { 3016 Type *FullTy = LHS->getType(); 3017 Type *TruncTy = 3018 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3019 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3020 } 3021 } 3022 3023 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3024 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3025 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3026 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3027 } 3028 3029 /// Get a canonical unsigned division expression, or something simpler if 3030 /// possible. 3031 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3032 const SCEV *RHS) { 3033 assert(getEffectiveSCEVType(LHS->getType()) == 3034 getEffectiveSCEVType(RHS->getType()) && 3035 "SCEVUDivExpr operand types don't match!"); 3036 3037 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3038 if (RHSC->getValue()->isOne()) 3039 return LHS; // X udiv 1 --> x 3040 // If the denominator is zero, the result of the udiv is undefined. Don't 3041 // try to analyze it, because the resolution chosen here may differ from 3042 // the resolution chosen in other parts of the compiler. 3043 if (!RHSC->getValue()->isZero()) { 3044 // Determine if the division can be folded into the operands of 3045 // its operands. 3046 // TODO: Generalize this to non-constants by using known-bits information. 3047 Type *Ty = LHS->getType(); 3048 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3049 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3050 // For non-power-of-two values, effectively round the value up to the 3051 // nearest power of two. 3052 if (!RHSC->getAPInt().isPowerOf2()) 3053 ++MaxShiftAmt; 3054 IntegerType *ExtTy = 3055 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3056 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3057 if (const SCEVConstant *Step = 3058 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3059 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3060 const APInt &StepInt = Step->getAPInt(); 3061 const APInt &DivInt = RHSC->getAPInt(); 3062 if (!StepInt.urem(DivInt) && 3063 getZeroExtendExpr(AR, ExtTy) == 3064 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3065 getZeroExtendExpr(Step, ExtTy), 3066 AR->getLoop(), SCEV::FlagAnyWrap)) { 3067 SmallVector<const SCEV *, 4> Operands; 3068 for (const SCEV *Op : AR->operands()) 3069 Operands.push_back(getUDivExpr(Op, RHS)); 3070 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3071 } 3072 /// Get a canonical UDivExpr for a recurrence. 3073 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3074 // We can currently only fold X%N if X is constant. 3075 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3076 if (StartC && !DivInt.urem(StepInt) && 3077 getZeroExtendExpr(AR, ExtTy) == 3078 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3079 getZeroExtendExpr(Step, ExtTy), 3080 AR->getLoop(), SCEV::FlagAnyWrap)) { 3081 const APInt &StartInt = StartC->getAPInt(); 3082 const APInt &StartRem = StartInt.urem(StepInt); 3083 if (StartRem != 0) 3084 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3085 AR->getLoop(), SCEV::FlagNW); 3086 } 3087 } 3088 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3089 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3090 SmallVector<const SCEV *, 4> Operands; 3091 for (const SCEV *Op : M->operands()) 3092 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3093 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3094 // Find an operand that's safely divisible. 3095 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3096 const SCEV *Op = M->getOperand(i); 3097 const SCEV *Div = getUDivExpr(Op, RHSC); 3098 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3099 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3100 M->op_end()); 3101 Operands[i] = Div; 3102 return getMulExpr(Operands); 3103 } 3104 } 3105 } 3106 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3107 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3108 SmallVector<const SCEV *, 4> Operands; 3109 for (const SCEV *Op : A->operands()) 3110 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3111 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3112 Operands.clear(); 3113 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3114 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3115 if (isa<SCEVUDivExpr>(Op) || 3116 getMulExpr(Op, RHS) != A->getOperand(i)) 3117 break; 3118 Operands.push_back(Op); 3119 } 3120 if (Operands.size() == A->getNumOperands()) 3121 return getAddExpr(Operands); 3122 } 3123 } 3124 3125 // Fold if both operands are constant. 3126 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3127 Constant *LHSCV = LHSC->getValue(); 3128 Constant *RHSCV = RHSC->getValue(); 3129 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3130 RHSCV))); 3131 } 3132 } 3133 } 3134 3135 FoldingSetNodeID ID; 3136 ID.AddInteger(scUDivExpr); 3137 ID.AddPointer(LHS); 3138 ID.AddPointer(RHS); 3139 void *IP = nullptr; 3140 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3141 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3142 LHS, RHS); 3143 UniqueSCEVs.InsertNode(S, IP); 3144 addToLoopUseLists(S); 3145 return S; 3146 } 3147 3148 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3149 APInt A = C1->getAPInt().abs(); 3150 APInt B = C2->getAPInt().abs(); 3151 uint32_t ABW = A.getBitWidth(); 3152 uint32_t BBW = B.getBitWidth(); 3153 3154 if (ABW > BBW) 3155 B = B.zext(ABW); 3156 else if (ABW < BBW) 3157 A = A.zext(BBW); 3158 3159 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3160 } 3161 3162 /// Get a canonical unsigned division expression, or something simpler if 3163 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3164 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3165 /// it's not exact because the udiv may be clearing bits. 3166 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3167 const SCEV *RHS) { 3168 // TODO: we could try to find factors in all sorts of things, but for now we 3169 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3170 // end of this file for inspiration. 3171 3172 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3173 if (!Mul || !Mul->hasNoUnsignedWrap()) 3174 return getUDivExpr(LHS, RHS); 3175 3176 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3177 // If the mulexpr multiplies by a constant, then that constant must be the 3178 // first element of the mulexpr. 3179 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3180 if (LHSCst == RHSCst) { 3181 SmallVector<const SCEV *, 2> Operands; 3182 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3183 return getMulExpr(Operands); 3184 } 3185 3186 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3187 // that there's a factor provided by one of the other terms. We need to 3188 // check. 3189 APInt Factor = gcd(LHSCst, RHSCst); 3190 if (!Factor.isIntN(1)) { 3191 LHSCst = 3192 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3193 RHSCst = 3194 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3195 SmallVector<const SCEV *, 2> Operands; 3196 Operands.push_back(LHSCst); 3197 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3198 LHS = getMulExpr(Operands); 3199 RHS = RHSCst; 3200 Mul = dyn_cast<SCEVMulExpr>(LHS); 3201 if (!Mul) 3202 return getUDivExactExpr(LHS, RHS); 3203 } 3204 } 3205 } 3206 3207 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3208 if (Mul->getOperand(i) == RHS) { 3209 SmallVector<const SCEV *, 2> Operands; 3210 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3211 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3212 return getMulExpr(Operands); 3213 } 3214 } 3215 3216 return getUDivExpr(LHS, RHS); 3217 } 3218 3219 /// Get an add recurrence expression for the specified loop. Simplify the 3220 /// expression as much as possible. 3221 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3222 const Loop *L, 3223 SCEV::NoWrapFlags Flags) { 3224 SmallVector<const SCEV *, 4> Operands; 3225 Operands.push_back(Start); 3226 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3227 if (StepChrec->getLoop() == L) { 3228 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3229 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3230 } 3231 3232 Operands.push_back(Step); 3233 return getAddRecExpr(Operands, L, Flags); 3234 } 3235 3236 /// Get an add recurrence expression for the specified loop. Simplify the 3237 /// expression as much as possible. 3238 const SCEV * 3239 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3240 const Loop *L, SCEV::NoWrapFlags Flags) { 3241 if (Operands.size() == 1) return Operands[0]; 3242 #ifndef NDEBUG 3243 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3244 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3245 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3246 "SCEVAddRecExpr operand types don't match!"); 3247 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3248 assert(isLoopInvariant(Operands[i], L) && 3249 "SCEVAddRecExpr operand is not loop-invariant!"); 3250 #endif 3251 3252 if (Operands.back()->isZero()) { 3253 Operands.pop_back(); 3254 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3255 } 3256 3257 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3258 // use that information to infer NUW and NSW flags. However, computing a 3259 // BE count requires calling getAddRecExpr, so we may not yet have a 3260 // meaningful BE count at this point (and if we don't, we'd be stuck 3261 // with a SCEVCouldNotCompute as the cached BE count). 3262 3263 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3264 3265 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3266 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3267 const Loop *NestedLoop = NestedAR->getLoop(); 3268 if (L->contains(NestedLoop) 3269 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3270 : (!NestedLoop->contains(L) && 3271 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3272 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3273 NestedAR->op_end()); 3274 Operands[0] = NestedAR->getStart(); 3275 // AddRecs require their operands be loop-invariant with respect to their 3276 // loops. Don't perform this transformation if it would break this 3277 // requirement. 3278 bool AllInvariant = all_of( 3279 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3280 3281 if (AllInvariant) { 3282 // Create a recurrence for the outer loop with the same step size. 3283 // 3284 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3285 // inner recurrence has the same property. 3286 SCEV::NoWrapFlags OuterFlags = 3287 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3288 3289 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3290 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3291 return isLoopInvariant(Op, NestedLoop); 3292 }); 3293 3294 if (AllInvariant) { 3295 // Ok, both add recurrences are valid after the transformation. 3296 // 3297 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3298 // the outer recurrence has the same property. 3299 SCEV::NoWrapFlags InnerFlags = 3300 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3301 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3302 } 3303 } 3304 // Reset Operands to its original state. 3305 Operands[0] = NestedAR; 3306 } 3307 } 3308 3309 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3310 // already have one, otherwise create a new one. 3311 FoldingSetNodeID ID; 3312 ID.AddInteger(scAddRecExpr); 3313 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3314 ID.AddPointer(Operands[i]); 3315 ID.AddPointer(L); 3316 void *IP = nullptr; 3317 SCEVAddRecExpr *S = 3318 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3319 if (!S) { 3320 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3321 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3322 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3323 O, Operands.size(), L); 3324 UniqueSCEVs.InsertNode(S, IP); 3325 addToLoopUseLists(S); 3326 } 3327 S->setNoWrapFlags(Flags); 3328 return S; 3329 } 3330 3331 const SCEV * 3332 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3333 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3334 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3335 // getSCEV(Base)->getType() has the same address space as Base->getType() 3336 // because SCEV::getType() preserves the address space. 3337 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3338 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3339 // instruction to its SCEV, because the Instruction may be guarded by control 3340 // flow and the no-overflow bits may not be valid for the expression in any 3341 // context. This can be fixed similarly to how these flags are handled for 3342 // adds. 3343 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3344 : SCEV::FlagAnyWrap; 3345 3346 const SCEV *TotalOffset = getZero(IntPtrTy); 3347 // The array size is unimportant. The first thing we do on CurTy is getting 3348 // its element type. 3349 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3350 for (const SCEV *IndexExpr : IndexExprs) { 3351 // Compute the (potentially symbolic) offset in bytes for this index. 3352 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3353 // For a struct, add the member offset. 3354 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3355 unsigned FieldNo = Index->getZExtValue(); 3356 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3357 3358 // Add the field offset to the running total offset. 3359 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3360 3361 // Update CurTy to the type of the field at Index. 3362 CurTy = STy->getTypeAtIndex(Index); 3363 } else { 3364 // Update CurTy to its element type. 3365 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3366 // For an array, add the element offset, explicitly scaled. 3367 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3368 // Getelementptr indices are signed. 3369 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3370 3371 // Multiply the index by the element size to compute the element offset. 3372 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3373 3374 // Add the element offset to the running total offset. 3375 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3376 } 3377 } 3378 3379 // Add the total offset from all the GEP indices to the base. 3380 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3381 } 3382 3383 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3384 const SCEV *RHS) { 3385 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3386 return getSMaxExpr(Ops); 3387 } 3388 3389 const SCEV * 3390 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3391 assert(!Ops.empty() && "Cannot get empty smax!"); 3392 if (Ops.size() == 1) return Ops[0]; 3393 #ifndef NDEBUG 3394 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3395 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3396 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3397 "SCEVSMaxExpr operand types don't match!"); 3398 #endif 3399 3400 // Sort by complexity, this groups all similar expression types together. 3401 GroupByComplexity(Ops, &LI, DT); 3402 3403 // If there are any constants, fold them together. 3404 unsigned Idx = 0; 3405 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3406 ++Idx; 3407 assert(Idx < Ops.size()); 3408 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3409 // We found two constants, fold them together! 3410 ConstantInt *Fold = ConstantInt::get( 3411 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3412 Ops[0] = getConstant(Fold); 3413 Ops.erase(Ops.begin()+1); // Erase the folded element 3414 if (Ops.size() == 1) return Ops[0]; 3415 LHSC = cast<SCEVConstant>(Ops[0]); 3416 } 3417 3418 // If we are left with a constant minimum-int, strip it off. 3419 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3420 Ops.erase(Ops.begin()); 3421 --Idx; 3422 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3423 // If we have an smax with a constant maximum-int, it will always be 3424 // maximum-int. 3425 return Ops[0]; 3426 } 3427 3428 if (Ops.size() == 1) return Ops[0]; 3429 } 3430 3431 // Find the first SMax 3432 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3433 ++Idx; 3434 3435 // Check to see if one of the operands is an SMax. If so, expand its operands 3436 // onto our operand list, and recurse to simplify. 3437 if (Idx < Ops.size()) { 3438 bool DeletedSMax = false; 3439 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3440 Ops.erase(Ops.begin()+Idx); 3441 Ops.append(SMax->op_begin(), SMax->op_end()); 3442 DeletedSMax = true; 3443 } 3444 3445 if (DeletedSMax) 3446 return getSMaxExpr(Ops); 3447 } 3448 3449 // Okay, check to see if the same value occurs in the operand list twice. If 3450 // so, delete one. Since we sorted the list, these values are required to 3451 // be adjacent. 3452 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3453 // X smax Y smax Y --> X smax Y 3454 // X smax Y --> X, if X is always greater than Y 3455 if (Ops[i] == Ops[i+1] || 3456 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3457 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3458 --i; --e; 3459 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3460 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3461 --i; --e; 3462 } 3463 3464 if (Ops.size() == 1) return Ops[0]; 3465 3466 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3467 3468 // Okay, it looks like we really DO need an smax expr. Check to see if we 3469 // already have one, otherwise create a new one. 3470 FoldingSetNodeID ID; 3471 ID.AddInteger(scSMaxExpr); 3472 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3473 ID.AddPointer(Ops[i]); 3474 void *IP = nullptr; 3475 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3476 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3477 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3478 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3479 O, Ops.size()); 3480 UniqueSCEVs.InsertNode(S, IP); 3481 addToLoopUseLists(S); 3482 return S; 3483 } 3484 3485 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3486 const SCEV *RHS) { 3487 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3488 return getUMaxExpr(Ops); 3489 } 3490 3491 const SCEV * 3492 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3493 assert(!Ops.empty() && "Cannot get empty umax!"); 3494 if (Ops.size() == 1) return Ops[0]; 3495 #ifndef NDEBUG 3496 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3497 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3498 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3499 "SCEVUMaxExpr operand types don't match!"); 3500 #endif 3501 3502 // Sort by complexity, this groups all similar expression types together. 3503 GroupByComplexity(Ops, &LI, DT); 3504 3505 // If there are any constants, fold them together. 3506 unsigned Idx = 0; 3507 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3508 ++Idx; 3509 assert(Idx < Ops.size()); 3510 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3511 // We found two constants, fold them together! 3512 ConstantInt *Fold = ConstantInt::get( 3513 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3514 Ops[0] = getConstant(Fold); 3515 Ops.erase(Ops.begin()+1); // Erase the folded element 3516 if (Ops.size() == 1) return Ops[0]; 3517 LHSC = cast<SCEVConstant>(Ops[0]); 3518 } 3519 3520 // If we are left with a constant minimum-int, strip it off. 3521 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3522 Ops.erase(Ops.begin()); 3523 --Idx; 3524 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3525 // If we have an umax with a constant maximum-int, it will always be 3526 // maximum-int. 3527 return Ops[0]; 3528 } 3529 3530 if (Ops.size() == 1) return Ops[0]; 3531 } 3532 3533 // Find the first UMax 3534 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3535 ++Idx; 3536 3537 // Check to see if one of the operands is a UMax. If so, expand its operands 3538 // onto our operand list, and recurse to simplify. 3539 if (Idx < Ops.size()) { 3540 bool DeletedUMax = false; 3541 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3542 Ops.erase(Ops.begin()+Idx); 3543 Ops.append(UMax->op_begin(), UMax->op_end()); 3544 DeletedUMax = true; 3545 } 3546 3547 if (DeletedUMax) 3548 return getUMaxExpr(Ops); 3549 } 3550 3551 // Okay, check to see if the same value occurs in the operand list twice. If 3552 // so, delete one. Since we sorted the list, these values are required to 3553 // be adjacent. 3554 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3555 // X umax Y umax Y --> X umax Y 3556 // X umax Y --> X, if X is always greater than Y 3557 if (Ops[i] == Ops[i+1] || 3558 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3559 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3560 --i; --e; 3561 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3562 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3563 --i; --e; 3564 } 3565 3566 if (Ops.size() == 1) return Ops[0]; 3567 3568 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3569 3570 // Okay, it looks like we really DO need a umax expr. Check to see if we 3571 // already have one, otherwise create a new one. 3572 FoldingSetNodeID ID; 3573 ID.AddInteger(scUMaxExpr); 3574 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3575 ID.AddPointer(Ops[i]); 3576 void *IP = nullptr; 3577 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3578 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3579 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3580 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3581 O, Ops.size()); 3582 UniqueSCEVs.InsertNode(S, IP); 3583 addToLoopUseLists(S); 3584 return S; 3585 } 3586 3587 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3588 const SCEV *RHS) { 3589 // ~smax(~x, ~y) == smin(x, y). 3590 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3591 } 3592 3593 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3594 const SCEV *RHS) { 3595 // ~umax(~x, ~y) == umin(x, y) 3596 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3597 } 3598 3599 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3600 // We can bypass creating a target-independent 3601 // constant expression and then folding it back into a ConstantInt. 3602 // This is just a compile-time optimization. 3603 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3604 } 3605 3606 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3607 StructType *STy, 3608 unsigned FieldNo) { 3609 // We can bypass creating a target-independent 3610 // constant expression and then folding it back into a ConstantInt. 3611 // This is just a compile-time optimization. 3612 return getConstant( 3613 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3614 } 3615 3616 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3617 // Don't attempt to do anything other than create a SCEVUnknown object 3618 // here. createSCEV only calls getUnknown after checking for all other 3619 // interesting possibilities, and any other code that calls getUnknown 3620 // is doing so in order to hide a value from SCEV canonicalization. 3621 3622 FoldingSetNodeID ID; 3623 ID.AddInteger(scUnknown); 3624 ID.AddPointer(V); 3625 void *IP = nullptr; 3626 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3627 assert(cast<SCEVUnknown>(S)->getValue() == V && 3628 "Stale SCEVUnknown in uniquing map!"); 3629 return S; 3630 } 3631 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3632 FirstUnknown); 3633 FirstUnknown = cast<SCEVUnknown>(S); 3634 UniqueSCEVs.InsertNode(S, IP); 3635 return S; 3636 } 3637 3638 //===----------------------------------------------------------------------===// 3639 // Basic SCEV Analysis and PHI Idiom Recognition Code 3640 // 3641 3642 /// Test if values of the given type are analyzable within the SCEV 3643 /// framework. This primarily includes integer types, and it can optionally 3644 /// include pointer types if the ScalarEvolution class has access to 3645 /// target-specific information. 3646 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3647 // Integers and pointers are always SCEVable. 3648 return Ty->isIntegerTy() || Ty->isPointerTy(); 3649 } 3650 3651 /// Return the size in bits of the specified type, for which isSCEVable must 3652 /// return true. 3653 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3654 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3655 return getDataLayout().getTypeSizeInBits(Ty); 3656 } 3657 3658 /// Return a type with the same bitwidth as the given type and which represents 3659 /// how SCEV will treat the given type, for which isSCEVable must return 3660 /// true. For pointer types, this is the pointer-sized integer type. 3661 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3662 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3663 3664 if (Ty->isIntegerTy()) 3665 return Ty; 3666 3667 // The only other support type is pointer. 3668 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3669 return getDataLayout().getIntPtrType(Ty); 3670 } 3671 3672 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3673 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3674 } 3675 3676 const SCEV *ScalarEvolution::getCouldNotCompute() { 3677 return CouldNotCompute.get(); 3678 } 3679 3680 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3681 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3682 auto *SU = dyn_cast<SCEVUnknown>(S); 3683 return SU && SU->getValue() == nullptr; 3684 }); 3685 3686 return !ContainsNulls; 3687 } 3688 3689 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3690 HasRecMapType::iterator I = HasRecMap.find(S); 3691 if (I != HasRecMap.end()) 3692 return I->second; 3693 3694 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3695 HasRecMap.insert({S, FoundAddRec}); 3696 return FoundAddRec; 3697 } 3698 3699 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3700 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3701 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3702 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3703 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3704 if (!Add) 3705 return {S, nullptr}; 3706 3707 if (Add->getNumOperands() != 2) 3708 return {S, nullptr}; 3709 3710 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3711 if (!ConstOp) 3712 return {S, nullptr}; 3713 3714 return {Add->getOperand(1), ConstOp->getValue()}; 3715 } 3716 3717 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3718 /// by the value and offset from any ValueOffsetPair in the set. 3719 SetVector<ScalarEvolution::ValueOffsetPair> * 3720 ScalarEvolution::getSCEVValues(const SCEV *S) { 3721 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3722 if (SI == ExprValueMap.end()) 3723 return nullptr; 3724 #ifndef NDEBUG 3725 if (VerifySCEVMap) { 3726 // Check there is no dangling Value in the set returned. 3727 for (const auto &VE : SI->second) 3728 assert(ValueExprMap.count(VE.first)); 3729 } 3730 #endif 3731 return &SI->second; 3732 } 3733 3734 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3735 /// cannot be used separately. eraseValueFromMap should be used to remove 3736 /// V from ValueExprMap and ExprValueMap at the same time. 3737 void ScalarEvolution::eraseValueFromMap(Value *V) { 3738 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3739 if (I != ValueExprMap.end()) { 3740 const SCEV *S = I->second; 3741 // Remove {V, 0} from the set of ExprValueMap[S] 3742 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3743 SV->remove({V, nullptr}); 3744 3745 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3746 const SCEV *Stripped; 3747 ConstantInt *Offset; 3748 std::tie(Stripped, Offset) = splitAddExpr(S); 3749 if (Offset != nullptr) { 3750 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3751 SV->remove({V, Offset}); 3752 } 3753 ValueExprMap.erase(V); 3754 } 3755 } 3756 3757 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3758 /// create a new one. 3759 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3760 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3761 3762 const SCEV *S = getExistingSCEV(V); 3763 if (S == nullptr) { 3764 S = createSCEV(V); 3765 // During PHI resolution, it is possible to create two SCEVs for the same 3766 // V, so it is needed to double check whether V->S is inserted into 3767 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3768 std::pair<ValueExprMapType::iterator, bool> Pair = 3769 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3770 if (Pair.second) { 3771 ExprValueMap[S].insert({V, nullptr}); 3772 3773 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3774 // ExprValueMap. 3775 const SCEV *Stripped = S; 3776 ConstantInt *Offset = nullptr; 3777 std::tie(Stripped, Offset) = splitAddExpr(S); 3778 // If stripped is SCEVUnknown, don't bother to save 3779 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3780 // increase the complexity of the expansion code. 3781 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3782 // because it may generate add/sub instead of GEP in SCEV expansion. 3783 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3784 !isa<GetElementPtrInst>(V)) 3785 ExprValueMap[Stripped].insert({V, Offset}); 3786 } 3787 } 3788 return S; 3789 } 3790 3791 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3792 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3793 3794 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3795 if (I != ValueExprMap.end()) { 3796 const SCEV *S = I->second; 3797 if (checkValidity(S)) 3798 return S; 3799 eraseValueFromMap(V); 3800 forgetMemoizedResults(S); 3801 } 3802 return nullptr; 3803 } 3804 3805 /// Return a SCEV corresponding to -V = -1*V 3806 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3807 SCEV::NoWrapFlags Flags) { 3808 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3809 return getConstant( 3810 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3811 3812 Type *Ty = V->getType(); 3813 Ty = getEffectiveSCEVType(Ty); 3814 return getMulExpr( 3815 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3816 } 3817 3818 /// Return a SCEV corresponding to ~V = -1-V 3819 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3820 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3821 return getConstant( 3822 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3823 3824 Type *Ty = V->getType(); 3825 Ty = getEffectiveSCEVType(Ty); 3826 const SCEV *AllOnes = 3827 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3828 return getMinusSCEV(AllOnes, V); 3829 } 3830 3831 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3832 SCEV::NoWrapFlags Flags, 3833 unsigned Depth) { 3834 // Fast path: X - X --> 0. 3835 if (LHS == RHS) 3836 return getZero(LHS->getType()); 3837 3838 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3839 // makes it so that we cannot make much use of NUW. 3840 auto AddFlags = SCEV::FlagAnyWrap; 3841 const bool RHSIsNotMinSigned = 3842 !getSignedRangeMin(RHS).isMinSignedValue(); 3843 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3844 // Let M be the minimum representable signed value. Then (-1)*RHS 3845 // signed-wraps if and only if RHS is M. That can happen even for 3846 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3847 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3848 // (-1)*RHS, we need to prove that RHS != M. 3849 // 3850 // If LHS is non-negative and we know that LHS - RHS does not 3851 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3852 // either by proving that RHS > M or that LHS >= 0. 3853 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3854 AddFlags = SCEV::FlagNSW; 3855 } 3856 } 3857 3858 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3859 // RHS is NSW and LHS >= 0. 3860 // 3861 // The difficulty here is that the NSW flag may have been proven 3862 // relative to a loop that is to be found in a recurrence in LHS and 3863 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3864 // larger scope than intended. 3865 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3866 3867 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3868 } 3869 3870 const SCEV * 3871 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3872 Type *SrcTy = V->getType(); 3873 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3874 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3875 "Cannot truncate or zero extend with non-integer arguments!"); 3876 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3877 return V; // No conversion 3878 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3879 return getTruncateExpr(V, Ty); 3880 return getZeroExtendExpr(V, Ty); 3881 } 3882 3883 const SCEV * 3884 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3885 Type *Ty) { 3886 Type *SrcTy = V->getType(); 3887 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3888 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3889 "Cannot truncate or zero extend with non-integer arguments!"); 3890 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3891 return V; // No conversion 3892 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3893 return getTruncateExpr(V, Ty); 3894 return getSignExtendExpr(V, Ty); 3895 } 3896 3897 const SCEV * 3898 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3899 Type *SrcTy = V->getType(); 3900 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3901 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3902 "Cannot noop or zero extend with non-integer arguments!"); 3903 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3904 "getNoopOrZeroExtend cannot truncate!"); 3905 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3906 return V; // No conversion 3907 return getZeroExtendExpr(V, Ty); 3908 } 3909 3910 const SCEV * 3911 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3912 Type *SrcTy = V->getType(); 3913 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3914 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3915 "Cannot noop or sign extend with non-integer arguments!"); 3916 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3917 "getNoopOrSignExtend cannot truncate!"); 3918 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3919 return V; // No conversion 3920 return getSignExtendExpr(V, Ty); 3921 } 3922 3923 const SCEV * 3924 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3925 Type *SrcTy = V->getType(); 3926 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3927 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3928 "Cannot noop or any extend with non-integer arguments!"); 3929 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3930 "getNoopOrAnyExtend cannot truncate!"); 3931 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3932 return V; // No conversion 3933 return getAnyExtendExpr(V, Ty); 3934 } 3935 3936 const SCEV * 3937 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3938 Type *SrcTy = V->getType(); 3939 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3940 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3941 "Cannot truncate or noop with non-integer arguments!"); 3942 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3943 "getTruncateOrNoop cannot extend!"); 3944 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3945 return V; // No conversion 3946 return getTruncateExpr(V, Ty); 3947 } 3948 3949 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3950 const SCEV *RHS) { 3951 const SCEV *PromotedLHS = LHS; 3952 const SCEV *PromotedRHS = RHS; 3953 3954 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3955 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3956 else 3957 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3958 3959 return getUMaxExpr(PromotedLHS, PromotedRHS); 3960 } 3961 3962 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3963 const SCEV *RHS) { 3964 const SCEV *PromotedLHS = LHS; 3965 const SCEV *PromotedRHS = RHS; 3966 3967 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3968 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3969 else 3970 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3971 3972 return getUMinExpr(PromotedLHS, PromotedRHS); 3973 } 3974 3975 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3976 // A pointer operand may evaluate to a nonpointer expression, such as null. 3977 if (!V->getType()->isPointerTy()) 3978 return V; 3979 3980 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3981 return getPointerBase(Cast->getOperand()); 3982 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3983 const SCEV *PtrOp = nullptr; 3984 for (const SCEV *NAryOp : NAry->operands()) { 3985 if (NAryOp->getType()->isPointerTy()) { 3986 // Cannot find the base of an expression with multiple pointer operands. 3987 if (PtrOp) 3988 return V; 3989 PtrOp = NAryOp; 3990 } 3991 } 3992 if (!PtrOp) 3993 return V; 3994 return getPointerBase(PtrOp); 3995 } 3996 return V; 3997 } 3998 3999 /// Push users of the given Instruction onto the given Worklist. 4000 static void 4001 PushDefUseChildren(Instruction *I, 4002 SmallVectorImpl<Instruction *> &Worklist) { 4003 // Push the def-use children onto the Worklist stack. 4004 for (User *U : I->users()) 4005 Worklist.push_back(cast<Instruction>(U)); 4006 } 4007 4008 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4009 SmallVector<Instruction *, 16> Worklist; 4010 PushDefUseChildren(PN, Worklist); 4011 4012 SmallPtrSet<Instruction *, 8> Visited; 4013 Visited.insert(PN); 4014 while (!Worklist.empty()) { 4015 Instruction *I = Worklist.pop_back_val(); 4016 if (!Visited.insert(I).second) 4017 continue; 4018 4019 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4020 if (It != ValueExprMap.end()) { 4021 const SCEV *Old = It->second; 4022 4023 // Short-circuit the def-use traversal if the symbolic name 4024 // ceases to appear in expressions. 4025 if (Old != SymName && !hasOperand(Old, SymName)) 4026 continue; 4027 4028 // SCEVUnknown for a PHI either means that it has an unrecognized 4029 // structure, it's a PHI that's in the progress of being computed 4030 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4031 // additional loop trip count information isn't going to change anything. 4032 // In the second case, createNodeForPHI will perform the necessary 4033 // updates on its own when it gets to that point. In the third, we do 4034 // want to forget the SCEVUnknown. 4035 if (!isa<PHINode>(I) || 4036 !isa<SCEVUnknown>(Old) || 4037 (I != PN && Old == SymName)) { 4038 eraseValueFromMap(It->first); 4039 forgetMemoizedResults(Old); 4040 } 4041 } 4042 4043 PushDefUseChildren(I, Worklist); 4044 } 4045 } 4046 4047 namespace { 4048 4049 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4050 public: 4051 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4052 ScalarEvolution &SE) { 4053 SCEVInitRewriter Rewriter(L, SE); 4054 const SCEV *Result = Rewriter.visit(S); 4055 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4056 } 4057 4058 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4059 if (!SE.isLoopInvariant(Expr, L)) 4060 Valid = false; 4061 return Expr; 4062 } 4063 4064 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4065 // Only allow AddRecExprs for this loop. 4066 if (Expr->getLoop() == L) 4067 return Expr->getStart(); 4068 Valid = false; 4069 return Expr; 4070 } 4071 4072 bool isValid() { return Valid; } 4073 4074 private: 4075 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4076 : SCEVRewriteVisitor(SE), L(L) {} 4077 4078 const Loop *L; 4079 bool Valid = true; 4080 }; 4081 4082 /// This class evaluates the compare condition by matching it against the 4083 /// condition of loop latch. If there is a match we assume a true value 4084 /// for the condition while building SCEV nodes. 4085 class SCEVBackedgeConditionFolder 4086 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4087 public: 4088 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4089 ScalarEvolution &SE) { 4090 bool IsPosBECond = false; 4091 Value *BECond = nullptr; 4092 if (BasicBlock *Latch = L->getLoopLatch()) { 4093 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4094 if (BI && BI->isConditional()) { 4095 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4096 "Both outgoing branches should not target same header!"); 4097 BECond = BI->getCondition(); 4098 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4099 } else { 4100 return S; 4101 } 4102 } 4103 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4104 return Rewriter.visit(S); 4105 } 4106 4107 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4108 const SCEV *Result = Expr; 4109 bool InvariantF = SE.isLoopInvariant(Expr, L); 4110 4111 if (!InvariantF) { 4112 Instruction *I = cast<Instruction>(Expr->getValue()); 4113 switch (I->getOpcode()) { 4114 case Instruction::Select: { 4115 SelectInst *SI = cast<SelectInst>(I); 4116 Optional<const SCEV *> Res = 4117 compareWithBackedgeCondition(SI->getCondition()); 4118 if (Res.hasValue()) { 4119 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4120 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4121 } 4122 break; 4123 } 4124 default: { 4125 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4126 if (Res.hasValue()) 4127 Result = Res.getValue(); 4128 break; 4129 } 4130 } 4131 } 4132 return Result; 4133 } 4134 4135 private: 4136 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4137 bool IsPosBECond, ScalarEvolution &SE) 4138 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4139 IsPositiveBECond(IsPosBECond) {} 4140 4141 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4142 4143 const Loop *L; 4144 /// Loop back condition. 4145 Value *BackedgeCond = nullptr; 4146 /// Set to true if loop back is on positive branch condition. 4147 bool IsPositiveBECond; 4148 }; 4149 4150 Optional<const SCEV *> 4151 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4152 4153 // If value matches the backedge condition for loop latch, 4154 // then return a constant evolution node based on loopback 4155 // branch taken. 4156 if (BackedgeCond == IC) 4157 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4158 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4159 return None; 4160 } 4161 4162 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4163 public: 4164 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4165 ScalarEvolution &SE) { 4166 SCEVShiftRewriter Rewriter(L, SE); 4167 const SCEV *Result = Rewriter.visit(S); 4168 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4169 } 4170 4171 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4172 // Only allow AddRecExprs for this loop. 4173 if (!SE.isLoopInvariant(Expr, L)) 4174 Valid = false; 4175 return Expr; 4176 } 4177 4178 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4179 if (Expr->getLoop() == L && Expr->isAffine()) 4180 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4181 Valid = false; 4182 return Expr; 4183 } 4184 4185 bool isValid() { return Valid; } 4186 4187 private: 4188 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4189 : SCEVRewriteVisitor(SE), L(L) {} 4190 4191 const Loop *L; 4192 bool Valid = true; 4193 }; 4194 4195 } // end anonymous namespace 4196 4197 SCEV::NoWrapFlags 4198 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4199 if (!AR->isAffine()) 4200 return SCEV::FlagAnyWrap; 4201 4202 using OBO = OverflowingBinaryOperator; 4203 4204 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4205 4206 if (!AR->hasNoSignedWrap()) { 4207 ConstantRange AddRecRange = getSignedRange(AR); 4208 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4209 4210 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4211 Instruction::Add, IncRange, OBO::NoSignedWrap); 4212 if (NSWRegion.contains(AddRecRange)) 4213 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4214 } 4215 4216 if (!AR->hasNoUnsignedWrap()) { 4217 ConstantRange AddRecRange = getUnsignedRange(AR); 4218 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4219 4220 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4221 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4222 if (NUWRegion.contains(AddRecRange)) 4223 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4224 } 4225 4226 return Result; 4227 } 4228 4229 namespace { 4230 4231 /// Represents an abstract binary operation. This may exist as a 4232 /// normal instruction or constant expression, or may have been 4233 /// derived from an expression tree. 4234 struct BinaryOp { 4235 unsigned Opcode; 4236 Value *LHS; 4237 Value *RHS; 4238 bool IsNSW = false; 4239 bool IsNUW = false; 4240 4241 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4242 /// constant expression. 4243 Operator *Op = nullptr; 4244 4245 explicit BinaryOp(Operator *Op) 4246 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4247 Op(Op) { 4248 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4249 IsNSW = OBO->hasNoSignedWrap(); 4250 IsNUW = OBO->hasNoUnsignedWrap(); 4251 } 4252 } 4253 4254 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4255 bool IsNUW = false) 4256 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4257 }; 4258 4259 } // end anonymous namespace 4260 4261 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4262 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4263 auto *Op = dyn_cast<Operator>(V); 4264 if (!Op) 4265 return None; 4266 4267 // Implementation detail: all the cleverness here should happen without 4268 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4269 // SCEV expressions when possible, and we should not break that. 4270 4271 switch (Op->getOpcode()) { 4272 case Instruction::Add: 4273 case Instruction::Sub: 4274 case Instruction::Mul: 4275 case Instruction::UDiv: 4276 case Instruction::URem: 4277 case Instruction::And: 4278 case Instruction::Or: 4279 case Instruction::AShr: 4280 case Instruction::Shl: 4281 return BinaryOp(Op); 4282 4283 case Instruction::Xor: 4284 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4285 // If the RHS of the xor is a signmask, then this is just an add. 4286 // Instcombine turns add of signmask into xor as a strength reduction step. 4287 if (RHSC->getValue().isSignMask()) 4288 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4289 return BinaryOp(Op); 4290 4291 case Instruction::LShr: 4292 // Turn logical shift right of a constant into a unsigned divide. 4293 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4294 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4295 4296 // If the shift count is not less than the bitwidth, the result of 4297 // the shift is undefined. Don't try to analyze it, because the 4298 // resolution chosen here may differ from the resolution chosen in 4299 // other parts of the compiler. 4300 if (SA->getValue().ult(BitWidth)) { 4301 Constant *X = 4302 ConstantInt::get(SA->getContext(), 4303 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4304 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4305 } 4306 } 4307 return BinaryOp(Op); 4308 4309 case Instruction::ExtractValue: { 4310 auto *EVI = cast<ExtractValueInst>(Op); 4311 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4312 break; 4313 4314 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4315 if (!CI) 4316 break; 4317 4318 if (auto *F = CI->getCalledFunction()) 4319 switch (F->getIntrinsicID()) { 4320 case Intrinsic::sadd_with_overflow: 4321 case Intrinsic::uadd_with_overflow: 4322 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4323 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4324 CI->getArgOperand(1)); 4325 4326 // Now that we know that all uses of the arithmetic-result component of 4327 // CI are guarded by the overflow check, we can go ahead and pretend 4328 // that the arithmetic is non-overflowing. 4329 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4330 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4331 CI->getArgOperand(1), /* IsNSW = */ true, 4332 /* IsNUW = */ false); 4333 else 4334 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4335 CI->getArgOperand(1), /* IsNSW = */ false, 4336 /* IsNUW*/ true); 4337 case Intrinsic::ssub_with_overflow: 4338 case Intrinsic::usub_with_overflow: 4339 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4340 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4341 CI->getArgOperand(1)); 4342 4343 // The same reasoning as sadd/uadd above. 4344 if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) 4345 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4346 CI->getArgOperand(1), /* IsNSW = */ true, 4347 /* IsNUW = */ false); 4348 else 4349 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4350 CI->getArgOperand(1), /* IsNSW = */ false, 4351 /* IsNUW = */ true); 4352 case Intrinsic::smul_with_overflow: 4353 case Intrinsic::umul_with_overflow: 4354 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4355 CI->getArgOperand(1)); 4356 default: 4357 break; 4358 } 4359 } 4360 4361 default: 4362 break; 4363 } 4364 4365 return None; 4366 } 4367 4368 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4369 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4370 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4371 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4372 /// follows one of the following patterns: 4373 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4374 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4375 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4376 /// we return the type of the truncation operation, and indicate whether the 4377 /// truncated type should be treated as signed/unsigned by setting 4378 /// \p Signed to true/false, respectively. 4379 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4380 bool &Signed, ScalarEvolution &SE) { 4381 // The case where Op == SymbolicPHI (that is, with no type conversions on 4382 // the way) is handled by the regular add recurrence creating logic and 4383 // would have already been triggered in createAddRecForPHI. Reaching it here 4384 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4385 // because one of the other operands of the SCEVAddExpr updating this PHI is 4386 // not invariant). 4387 // 4388 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4389 // this case predicates that allow us to prove that Op == SymbolicPHI will 4390 // be added. 4391 if (Op == SymbolicPHI) 4392 return nullptr; 4393 4394 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4395 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4396 if (SourceBits != NewBits) 4397 return nullptr; 4398 4399 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4400 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4401 if (!SExt && !ZExt) 4402 return nullptr; 4403 const SCEVTruncateExpr *Trunc = 4404 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4405 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4406 if (!Trunc) 4407 return nullptr; 4408 const SCEV *X = Trunc->getOperand(); 4409 if (X != SymbolicPHI) 4410 return nullptr; 4411 Signed = SExt != nullptr; 4412 return Trunc->getType(); 4413 } 4414 4415 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4416 if (!PN->getType()->isIntegerTy()) 4417 return nullptr; 4418 const Loop *L = LI.getLoopFor(PN->getParent()); 4419 if (!L || L->getHeader() != PN->getParent()) 4420 return nullptr; 4421 return L; 4422 } 4423 4424 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4425 // computation that updates the phi follows the following pattern: 4426 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4427 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4428 // If so, try to see if it can be rewritten as an AddRecExpr under some 4429 // Predicates. If successful, return them as a pair. Also cache the results 4430 // of the analysis. 4431 // 4432 // Example usage scenario: 4433 // Say the Rewriter is called for the following SCEV: 4434 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4435 // where: 4436 // %X = phi i64 (%Start, %BEValue) 4437 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4438 // and call this function with %SymbolicPHI = %X. 4439 // 4440 // The analysis will find that the value coming around the backedge has 4441 // the following SCEV: 4442 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4443 // Upon concluding that this matches the desired pattern, the function 4444 // will return the pair {NewAddRec, SmallPredsVec} where: 4445 // NewAddRec = {%Start,+,%Step} 4446 // SmallPredsVec = {P1, P2, P3} as follows: 4447 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4448 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4449 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4450 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4451 // under the predicates {P1,P2,P3}. 4452 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4453 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4454 // 4455 // TODO's: 4456 // 4457 // 1) Extend the Induction descriptor to also support inductions that involve 4458 // casts: When needed (namely, when we are called in the context of the 4459 // vectorizer induction analysis), a Set of cast instructions will be 4460 // populated by this method, and provided back to isInductionPHI. This is 4461 // needed to allow the vectorizer to properly record them to be ignored by 4462 // the cost model and to avoid vectorizing them (otherwise these casts, 4463 // which are redundant under the runtime overflow checks, will be 4464 // vectorized, which can be costly). 4465 // 4466 // 2) Support additional induction/PHISCEV patterns: We also want to support 4467 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4468 // after the induction update operation (the induction increment): 4469 // 4470 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4471 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4472 // 4473 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4474 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4475 // 4476 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4477 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4478 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4479 SmallVector<const SCEVPredicate *, 3> Predicates; 4480 4481 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4482 // return an AddRec expression under some predicate. 4483 4484 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4485 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4486 assert(L && "Expecting an integer loop header phi"); 4487 4488 // The loop may have multiple entrances or multiple exits; we can analyze 4489 // this phi as an addrec if it has a unique entry value and a unique 4490 // backedge value. 4491 Value *BEValueV = nullptr, *StartValueV = nullptr; 4492 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4493 Value *V = PN->getIncomingValue(i); 4494 if (L->contains(PN->getIncomingBlock(i))) { 4495 if (!BEValueV) { 4496 BEValueV = V; 4497 } else if (BEValueV != V) { 4498 BEValueV = nullptr; 4499 break; 4500 } 4501 } else if (!StartValueV) { 4502 StartValueV = V; 4503 } else if (StartValueV != V) { 4504 StartValueV = nullptr; 4505 break; 4506 } 4507 } 4508 if (!BEValueV || !StartValueV) 4509 return None; 4510 4511 const SCEV *BEValue = getSCEV(BEValueV); 4512 4513 // If the value coming around the backedge is an add with the symbolic 4514 // value we just inserted, possibly with casts that we can ignore under 4515 // an appropriate runtime guard, then we found a simple induction variable! 4516 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4517 if (!Add) 4518 return None; 4519 4520 // If there is a single occurrence of the symbolic value, possibly 4521 // casted, replace it with a recurrence. 4522 unsigned FoundIndex = Add->getNumOperands(); 4523 Type *TruncTy = nullptr; 4524 bool Signed; 4525 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4526 if ((TruncTy = 4527 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4528 if (FoundIndex == e) { 4529 FoundIndex = i; 4530 break; 4531 } 4532 4533 if (FoundIndex == Add->getNumOperands()) 4534 return None; 4535 4536 // Create an add with everything but the specified operand. 4537 SmallVector<const SCEV *, 8> Ops; 4538 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4539 if (i != FoundIndex) 4540 Ops.push_back(Add->getOperand(i)); 4541 const SCEV *Accum = getAddExpr(Ops); 4542 4543 // The runtime checks will not be valid if the step amount is 4544 // varying inside the loop. 4545 if (!isLoopInvariant(Accum, L)) 4546 return None; 4547 4548 // *** Part2: Create the predicates 4549 4550 // Analysis was successful: we have a phi-with-cast pattern for which we 4551 // can return an AddRec expression under the following predicates: 4552 // 4553 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4554 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4555 // P2: An Equal predicate that guarantees that 4556 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4557 // P3: An Equal predicate that guarantees that 4558 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4559 // 4560 // As we next prove, the above predicates guarantee that: 4561 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4562 // 4563 // 4564 // More formally, we want to prove that: 4565 // Expr(i+1) = Start + (i+1) * Accum 4566 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4567 // 4568 // Given that: 4569 // 1) Expr(0) = Start 4570 // 2) Expr(1) = Start + Accum 4571 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4572 // 3) Induction hypothesis (step i): 4573 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4574 // 4575 // Proof: 4576 // Expr(i+1) = 4577 // = Start + (i+1)*Accum 4578 // = (Start + i*Accum) + Accum 4579 // = Expr(i) + Accum 4580 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4581 // :: from step i 4582 // 4583 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4584 // 4585 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4586 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4587 // + Accum :: from P3 4588 // 4589 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4590 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4591 // 4592 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4593 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4594 // 4595 // By induction, the same applies to all iterations 1<=i<n: 4596 // 4597 4598 // Create a truncated addrec for which we will add a no overflow check (P1). 4599 const SCEV *StartVal = getSCEV(StartValueV); 4600 const SCEV *PHISCEV = 4601 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4602 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4603 4604 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4605 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4606 // will be constant. 4607 // 4608 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4609 // add P1. 4610 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4611 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4612 Signed ? SCEVWrapPredicate::IncrementNSSW 4613 : SCEVWrapPredicate::IncrementNUSW; 4614 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4615 Predicates.push_back(AddRecPred); 4616 } 4617 4618 // Create the Equal Predicates P2,P3: 4619 4620 // It is possible that the predicates P2 and/or P3 are computable at 4621 // compile time due to StartVal and/or Accum being constants. 4622 // If either one is, then we can check that now and escape if either P2 4623 // or P3 is false. 4624 4625 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4626 // for each of StartVal and Accum 4627 auto GetExtendedExpr = [&](const SCEV *Expr) -> const SCEV * { 4628 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4629 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4630 const SCEV *ExtendedExpr = 4631 Signed ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4632 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4633 return ExtendedExpr; 4634 }; 4635 4636 // Given: 4637 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4638 // = GetExtendedExpr(Expr) 4639 // Determine whether the predicate P: Expr == ExtendedExpr 4640 // is known to be false at compile time 4641 auto PredIsKnownFalse = [&](const SCEV *Expr, 4642 const SCEV *ExtendedExpr) -> bool { 4643 return Expr != ExtendedExpr && 4644 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4645 }; 4646 4647 const SCEV *StartExtended = GetExtendedExpr(StartVal); 4648 if (PredIsKnownFalse(StartVal, StartExtended)) { 4649 DEBUG(dbgs() << "P2 is compile-time false\n";); 4650 return None; 4651 } 4652 4653 const SCEV *AccumExtended = GetExtendedExpr(Accum); 4654 if (PredIsKnownFalse(Accum, AccumExtended)) { 4655 DEBUG(dbgs() << "P3 is compile-time false\n";); 4656 return None; 4657 } 4658 4659 auto AppendPredicate = [&](const SCEV *Expr, 4660 const SCEV *ExtendedExpr) -> void { 4661 if (Expr != ExtendedExpr && 4662 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4663 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4664 DEBUG (dbgs() << "Added Predicate: " << *Pred); 4665 Predicates.push_back(Pred); 4666 } 4667 }; 4668 4669 AppendPredicate(StartVal, StartExtended); 4670 AppendPredicate(Accum, AccumExtended); 4671 4672 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4673 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4674 // into NewAR if it will also add the runtime overflow checks specified in 4675 // Predicates. 4676 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4677 4678 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4679 std::make_pair(NewAR, Predicates); 4680 // Remember the result of the analysis for this SCEV at this locayyytion. 4681 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4682 return PredRewrite; 4683 } 4684 4685 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4686 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4687 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4688 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4689 if (!L) 4690 return None; 4691 4692 // Check to see if we already analyzed this PHI. 4693 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4694 if (I != PredicatedSCEVRewrites.end()) { 4695 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4696 I->second; 4697 // Analysis was done before and failed to create an AddRec: 4698 if (Rewrite.first == SymbolicPHI) 4699 return None; 4700 // Analysis was done before and succeeded to create an AddRec under 4701 // a predicate: 4702 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4703 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4704 return Rewrite; 4705 } 4706 4707 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4708 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4709 4710 // Record in the cache that the analysis failed 4711 if (!Rewrite) { 4712 SmallVector<const SCEVPredicate *, 3> Predicates; 4713 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4714 return None; 4715 } 4716 4717 return Rewrite; 4718 } 4719 4720 /// A helper function for createAddRecFromPHI to handle simple cases. 4721 /// 4722 /// This function tries to find an AddRec expression for the simplest (yet most 4723 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4724 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4725 /// technique for finding the AddRec expression. 4726 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4727 Value *BEValueV, 4728 Value *StartValueV) { 4729 const Loop *L = LI.getLoopFor(PN->getParent()); 4730 assert(L && L->getHeader() == PN->getParent()); 4731 assert(BEValueV && StartValueV); 4732 4733 auto BO = MatchBinaryOp(BEValueV, DT); 4734 if (!BO) 4735 return nullptr; 4736 4737 if (BO->Opcode != Instruction::Add) 4738 return nullptr; 4739 4740 const SCEV *Accum = nullptr; 4741 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4742 Accum = getSCEV(BO->RHS); 4743 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4744 Accum = getSCEV(BO->LHS); 4745 4746 if (!Accum) 4747 return nullptr; 4748 4749 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4750 if (BO->IsNUW) 4751 Flags = setFlags(Flags, SCEV::FlagNUW); 4752 if (BO->IsNSW) 4753 Flags = setFlags(Flags, SCEV::FlagNSW); 4754 4755 const SCEV *StartVal = getSCEV(StartValueV); 4756 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4757 4758 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4759 4760 // We can add Flags to the post-inc expression only if we 4761 // know that it is *undefined behavior* for BEValueV to 4762 // overflow. 4763 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4764 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4765 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4766 4767 return PHISCEV; 4768 } 4769 4770 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4771 const Loop *L = LI.getLoopFor(PN->getParent()); 4772 if (!L || L->getHeader() != PN->getParent()) 4773 return nullptr; 4774 4775 // The loop may have multiple entrances or multiple exits; we can analyze 4776 // this phi as an addrec if it has a unique entry value and a unique 4777 // backedge value. 4778 Value *BEValueV = nullptr, *StartValueV = nullptr; 4779 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4780 Value *V = PN->getIncomingValue(i); 4781 if (L->contains(PN->getIncomingBlock(i))) { 4782 if (!BEValueV) { 4783 BEValueV = V; 4784 } else if (BEValueV != V) { 4785 BEValueV = nullptr; 4786 break; 4787 } 4788 } else if (!StartValueV) { 4789 StartValueV = V; 4790 } else if (StartValueV != V) { 4791 StartValueV = nullptr; 4792 break; 4793 } 4794 } 4795 if (!BEValueV || !StartValueV) 4796 return nullptr; 4797 4798 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4799 "PHI node already processed?"); 4800 4801 // First, try to find AddRec expression without creating a fictituos symbolic 4802 // value for PN. 4803 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4804 return S; 4805 4806 // Handle PHI node value symbolically. 4807 const SCEV *SymbolicName = getUnknown(PN); 4808 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4809 4810 // Using this symbolic name for the PHI, analyze the value coming around 4811 // the back-edge. 4812 const SCEV *BEValue = getSCEV(BEValueV); 4813 4814 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4815 // has a special value for the first iteration of the loop. 4816 4817 // If the value coming around the backedge is an add with the symbolic 4818 // value we just inserted, then we found a simple induction variable! 4819 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4820 // If there is a single occurrence of the symbolic value, replace it 4821 // with a recurrence. 4822 unsigned FoundIndex = Add->getNumOperands(); 4823 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4824 if (Add->getOperand(i) == SymbolicName) 4825 if (FoundIndex == e) { 4826 FoundIndex = i; 4827 break; 4828 } 4829 4830 if (FoundIndex != Add->getNumOperands()) { 4831 // Create an add with everything but the specified operand. 4832 SmallVector<const SCEV *, 8> Ops; 4833 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4834 if (i != FoundIndex) 4835 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 4836 L, *this)); 4837 const SCEV *Accum = getAddExpr(Ops); 4838 4839 // This is not a valid addrec if the step amount is varying each 4840 // loop iteration, but is not itself an addrec in this loop. 4841 if (isLoopInvariant(Accum, L) || 4842 (isa<SCEVAddRecExpr>(Accum) && 4843 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4844 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4845 4846 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4847 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4848 if (BO->IsNUW) 4849 Flags = setFlags(Flags, SCEV::FlagNUW); 4850 if (BO->IsNSW) 4851 Flags = setFlags(Flags, SCEV::FlagNSW); 4852 } 4853 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4854 // If the increment is an inbounds GEP, then we know the address 4855 // space cannot be wrapped around. We cannot make any guarantee 4856 // about signed or unsigned overflow because pointers are 4857 // unsigned but we may have a negative index from the base 4858 // pointer. We can guarantee that no unsigned wrap occurs if the 4859 // indices form a positive value. 4860 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4861 Flags = setFlags(Flags, SCEV::FlagNW); 4862 4863 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4864 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4865 Flags = setFlags(Flags, SCEV::FlagNUW); 4866 } 4867 4868 // We cannot transfer nuw and nsw flags from subtraction 4869 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4870 // for instance. 4871 } 4872 4873 const SCEV *StartVal = getSCEV(StartValueV); 4874 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4875 4876 // Okay, for the entire analysis of this edge we assumed the PHI 4877 // to be symbolic. We now need to go back and purge all of the 4878 // entries for the scalars that use the symbolic expression. 4879 forgetSymbolicName(PN, SymbolicName); 4880 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4881 4882 // We can add Flags to the post-inc expression only if we 4883 // know that it is *undefined behavior* for BEValueV to 4884 // overflow. 4885 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4886 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4887 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4888 4889 return PHISCEV; 4890 } 4891 } 4892 } else { 4893 // Otherwise, this could be a loop like this: 4894 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4895 // In this case, j = {1,+,1} and BEValue is j. 4896 // Because the other in-value of i (0) fits the evolution of BEValue 4897 // i really is an addrec evolution. 4898 // 4899 // We can generalize this saying that i is the shifted value of BEValue 4900 // by one iteration: 4901 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4902 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4903 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4904 if (Shifted != getCouldNotCompute() && 4905 Start != getCouldNotCompute()) { 4906 const SCEV *StartVal = getSCEV(StartValueV); 4907 if (Start == StartVal) { 4908 // Okay, for the entire analysis of this edge we assumed the PHI 4909 // to be symbolic. We now need to go back and purge all of the 4910 // entries for the scalars that use the symbolic expression. 4911 forgetSymbolicName(PN, SymbolicName); 4912 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4913 return Shifted; 4914 } 4915 } 4916 } 4917 4918 // Remove the temporary PHI node SCEV that has been inserted while intending 4919 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4920 // as it will prevent later (possibly simpler) SCEV expressions to be added 4921 // to the ValueExprMap. 4922 eraseValueFromMap(PN); 4923 4924 return nullptr; 4925 } 4926 4927 // Checks if the SCEV S is available at BB. S is considered available at BB 4928 // if S can be materialized at BB without introducing a fault. 4929 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4930 BasicBlock *BB) { 4931 struct CheckAvailable { 4932 bool TraversalDone = false; 4933 bool Available = true; 4934 4935 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4936 BasicBlock *BB = nullptr; 4937 DominatorTree &DT; 4938 4939 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4940 : L(L), BB(BB), DT(DT) {} 4941 4942 bool setUnavailable() { 4943 TraversalDone = true; 4944 Available = false; 4945 return false; 4946 } 4947 4948 bool follow(const SCEV *S) { 4949 switch (S->getSCEVType()) { 4950 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4951 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4952 // These expressions are available if their operand(s) is/are. 4953 return true; 4954 4955 case scAddRecExpr: { 4956 // We allow add recurrences that are on the loop BB is in, or some 4957 // outer loop. This guarantees availability because the value of the 4958 // add recurrence at BB is simply the "current" value of the induction 4959 // variable. We can relax this in the future; for instance an add 4960 // recurrence on a sibling dominating loop is also available at BB. 4961 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4962 if (L && (ARLoop == L || ARLoop->contains(L))) 4963 return true; 4964 4965 return setUnavailable(); 4966 } 4967 4968 case scUnknown: { 4969 // For SCEVUnknown, we check for simple dominance. 4970 const auto *SU = cast<SCEVUnknown>(S); 4971 Value *V = SU->getValue(); 4972 4973 if (isa<Argument>(V)) 4974 return false; 4975 4976 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4977 return false; 4978 4979 return setUnavailable(); 4980 } 4981 4982 case scUDivExpr: 4983 case scCouldNotCompute: 4984 // We do not try to smart about these at all. 4985 return setUnavailable(); 4986 } 4987 llvm_unreachable("switch should be fully covered!"); 4988 } 4989 4990 bool isDone() { return TraversalDone; } 4991 }; 4992 4993 CheckAvailable CA(L, BB, DT); 4994 SCEVTraversal<CheckAvailable> ST(CA); 4995 4996 ST.visitAll(S); 4997 return CA.Available; 4998 } 4999 5000 // Try to match a control flow sequence that branches out at BI and merges back 5001 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5002 // match. 5003 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5004 Value *&C, Value *&LHS, Value *&RHS) { 5005 C = BI->getCondition(); 5006 5007 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5008 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5009 5010 if (!LeftEdge.isSingleEdge()) 5011 return false; 5012 5013 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5014 5015 Use &LeftUse = Merge->getOperandUse(0); 5016 Use &RightUse = Merge->getOperandUse(1); 5017 5018 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5019 LHS = LeftUse; 5020 RHS = RightUse; 5021 return true; 5022 } 5023 5024 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5025 LHS = RightUse; 5026 RHS = LeftUse; 5027 return true; 5028 } 5029 5030 return false; 5031 } 5032 5033 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5034 auto IsReachable = 5035 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5036 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5037 const Loop *L = LI.getLoopFor(PN->getParent()); 5038 5039 // We don't want to break LCSSA, even in a SCEV expression tree. 5040 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5041 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5042 return nullptr; 5043 5044 // Try to match 5045 // 5046 // br %cond, label %left, label %right 5047 // left: 5048 // br label %merge 5049 // right: 5050 // br label %merge 5051 // merge: 5052 // V = phi [ %x, %left ], [ %y, %right ] 5053 // 5054 // as "select %cond, %x, %y" 5055 5056 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5057 assert(IDom && "At least the entry block should dominate PN"); 5058 5059 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5060 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5061 5062 if (BI && BI->isConditional() && 5063 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5064 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5065 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5066 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5067 } 5068 5069 return nullptr; 5070 } 5071 5072 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5073 if (const SCEV *S = createAddRecFromPHI(PN)) 5074 return S; 5075 5076 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5077 return S; 5078 5079 // If the PHI has a single incoming value, follow that value, unless the 5080 // PHI's incoming blocks are in a different loop, in which case doing so 5081 // risks breaking LCSSA form. Instcombine would normally zap these, but 5082 // it doesn't have DominatorTree information, so it may miss cases. 5083 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5084 if (LI.replacementPreservesLCSSAForm(PN, V)) 5085 return getSCEV(V); 5086 5087 // If it's not a loop phi, we can't handle it yet. 5088 return getUnknown(PN); 5089 } 5090 5091 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5092 Value *Cond, 5093 Value *TrueVal, 5094 Value *FalseVal) { 5095 // Handle "constant" branch or select. This can occur for instance when a 5096 // loop pass transforms an inner loop and moves on to process the outer loop. 5097 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5098 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5099 5100 // Try to match some simple smax or umax patterns. 5101 auto *ICI = dyn_cast<ICmpInst>(Cond); 5102 if (!ICI) 5103 return getUnknown(I); 5104 5105 Value *LHS = ICI->getOperand(0); 5106 Value *RHS = ICI->getOperand(1); 5107 5108 switch (ICI->getPredicate()) { 5109 case ICmpInst::ICMP_SLT: 5110 case ICmpInst::ICMP_SLE: 5111 std::swap(LHS, RHS); 5112 LLVM_FALLTHROUGH; 5113 case ICmpInst::ICMP_SGT: 5114 case ICmpInst::ICMP_SGE: 5115 // a >s b ? a+x : b+x -> smax(a, b)+x 5116 // a >s b ? b+x : a+x -> smin(a, b)+x 5117 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5118 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5119 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5120 const SCEV *LA = getSCEV(TrueVal); 5121 const SCEV *RA = getSCEV(FalseVal); 5122 const SCEV *LDiff = getMinusSCEV(LA, LS); 5123 const SCEV *RDiff = getMinusSCEV(RA, RS); 5124 if (LDiff == RDiff) 5125 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5126 LDiff = getMinusSCEV(LA, RS); 5127 RDiff = getMinusSCEV(RA, LS); 5128 if (LDiff == RDiff) 5129 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5130 } 5131 break; 5132 case ICmpInst::ICMP_ULT: 5133 case ICmpInst::ICMP_ULE: 5134 std::swap(LHS, RHS); 5135 LLVM_FALLTHROUGH; 5136 case ICmpInst::ICMP_UGT: 5137 case ICmpInst::ICMP_UGE: 5138 // a >u b ? a+x : b+x -> umax(a, b)+x 5139 // a >u b ? b+x : a+x -> umin(a, b)+x 5140 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5141 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5142 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5143 const SCEV *LA = getSCEV(TrueVal); 5144 const SCEV *RA = getSCEV(FalseVal); 5145 const SCEV *LDiff = getMinusSCEV(LA, LS); 5146 const SCEV *RDiff = getMinusSCEV(RA, RS); 5147 if (LDiff == RDiff) 5148 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5149 LDiff = getMinusSCEV(LA, RS); 5150 RDiff = getMinusSCEV(RA, LS); 5151 if (LDiff == RDiff) 5152 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5153 } 5154 break; 5155 case ICmpInst::ICMP_NE: 5156 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5157 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5158 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5159 const SCEV *One = getOne(I->getType()); 5160 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5161 const SCEV *LA = getSCEV(TrueVal); 5162 const SCEV *RA = getSCEV(FalseVal); 5163 const SCEV *LDiff = getMinusSCEV(LA, LS); 5164 const SCEV *RDiff = getMinusSCEV(RA, One); 5165 if (LDiff == RDiff) 5166 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5167 } 5168 break; 5169 case ICmpInst::ICMP_EQ: 5170 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5171 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5172 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5173 const SCEV *One = getOne(I->getType()); 5174 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5175 const SCEV *LA = getSCEV(TrueVal); 5176 const SCEV *RA = getSCEV(FalseVal); 5177 const SCEV *LDiff = getMinusSCEV(LA, One); 5178 const SCEV *RDiff = getMinusSCEV(RA, LS); 5179 if (LDiff == RDiff) 5180 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5181 } 5182 break; 5183 default: 5184 break; 5185 } 5186 5187 return getUnknown(I); 5188 } 5189 5190 /// Expand GEP instructions into add and multiply operations. This allows them 5191 /// to be analyzed by regular SCEV code. 5192 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5193 // Don't attempt to analyze GEPs over unsized objects. 5194 if (!GEP->getSourceElementType()->isSized()) 5195 return getUnknown(GEP); 5196 5197 SmallVector<const SCEV *, 4> IndexExprs; 5198 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5199 IndexExprs.push_back(getSCEV(*Index)); 5200 return getGEPExpr(GEP, IndexExprs); 5201 } 5202 5203 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5204 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5205 return C->getAPInt().countTrailingZeros(); 5206 5207 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5208 return std::min(GetMinTrailingZeros(T->getOperand()), 5209 (uint32_t)getTypeSizeInBits(T->getType())); 5210 5211 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5212 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5213 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5214 ? getTypeSizeInBits(E->getType()) 5215 : OpRes; 5216 } 5217 5218 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5219 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5220 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5221 ? getTypeSizeInBits(E->getType()) 5222 : OpRes; 5223 } 5224 5225 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5226 // The result is the min of all operands results. 5227 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5228 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5229 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5230 return MinOpRes; 5231 } 5232 5233 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5234 // The result is the sum of all operands results. 5235 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5236 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5237 for (unsigned i = 1, e = M->getNumOperands(); 5238 SumOpRes != BitWidth && i != e; ++i) 5239 SumOpRes = 5240 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5241 return SumOpRes; 5242 } 5243 5244 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5245 // The result is the min of all operands results. 5246 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5247 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5248 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5249 return MinOpRes; 5250 } 5251 5252 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5253 // The result is the min of all operands results. 5254 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5255 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5256 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5257 return MinOpRes; 5258 } 5259 5260 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5261 // The result is the min of all operands results. 5262 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5263 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5264 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5265 return MinOpRes; 5266 } 5267 5268 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5269 // For a SCEVUnknown, ask ValueTracking. 5270 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5271 return Known.countMinTrailingZeros(); 5272 } 5273 5274 // SCEVUDivExpr 5275 return 0; 5276 } 5277 5278 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5279 auto I = MinTrailingZerosCache.find(S); 5280 if (I != MinTrailingZerosCache.end()) 5281 return I->second; 5282 5283 uint32_t Result = GetMinTrailingZerosImpl(S); 5284 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5285 assert(InsertPair.second && "Should insert a new key"); 5286 return InsertPair.first->second; 5287 } 5288 5289 /// Helper method to assign a range to V from metadata present in the IR. 5290 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5291 if (Instruction *I = dyn_cast<Instruction>(V)) 5292 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5293 return getConstantRangeFromMetadata(*MD); 5294 5295 return None; 5296 } 5297 5298 /// Determine the range for a particular SCEV. If SignHint is 5299 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5300 /// with a "cleaner" unsigned (resp. signed) representation. 5301 const ConstantRange & 5302 ScalarEvolution::getRangeRef(const SCEV *S, 5303 ScalarEvolution::RangeSignHint SignHint) { 5304 DenseMap<const SCEV *, ConstantRange> &Cache = 5305 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5306 : SignedRanges; 5307 5308 // See if we've computed this range already. 5309 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5310 if (I != Cache.end()) 5311 return I->second; 5312 5313 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5314 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5315 5316 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5317 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5318 5319 // If the value has known zeros, the maximum value will have those known zeros 5320 // as well. 5321 uint32_t TZ = GetMinTrailingZeros(S); 5322 if (TZ != 0) { 5323 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5324 ConservativeResult = 5325 ConstantRange(APInt::getMinValue(BitWidth), 5326 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5327 else 5328 ConservativeResult = ConstantRange( 5329 APInt::getSignedMinValue(BitWidth), 5330 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5331 } 5332 5333 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5334 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5335 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5336 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5337 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5338 } 5339 5340 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5341 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5342 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5343 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5344 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5345 } 5346 5347 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5348 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5349 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5350 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5351 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5352 } 5353 5354 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5355 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5356 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5357 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5358 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5359 } 5360 5361 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5362 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5363 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5364 return setRange(UDiv, SignHint, 5365 ConservativeResult.intersectWith(X.udiv(Y))); 5366 } 5367 5368 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5369 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5370 return setRange(ZExt, SignHint, 5371 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5372 } 5373 5374 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5375 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5376 return setRange(SExt, SignHint, 5377 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5378 } 5379 5380 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5381 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5382 return setRange(Trunc, SignHint, 5383 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5384 } 5385 5386 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5387 // If there's no unsigned wrap, the value will never be less than its 5388 // initial value. 5389 if (AddRec->hasNoUnsignedWrap()) 5390 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5391 if (!C->getValue()->isZero()) 5392 ConservativeResult = ConservativeResult.intersectWith( 5393 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5394 5395 // If there's no signed wrap, and all the operands have the same sign or 5396 // zero, the value won't ever change sign. 5397 if (AddRec->hasNoSignedWrap()) { 5398 bool AllNonNeg = true; 5399 bool AllNonPos = true; 5400 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5401 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5402 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5403 } 5404 if (AllNonNeg) 5405 ConservativeResult = ConservativeResult.intersectWith( 5406 ConstantRange(APInt(BitWidth, 0), 5407 APInt::getSignedMinValue(BitWidth))); 5408 else if (AllNonPos) 5409 ConservativeResult = ConservativeResult.intersectWith( 5410 ConstantRange(APInt::getSignedMinValue(BitWidth), 5411 APInt(BitWidth, 1))); 5412 } 5413 5414 // TODO: non-affine addrec 5415 if (AddRec->isAffine()) { 5416 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5417 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5418 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5419 auto RangeFromAffine = getRangeForAffineAR( 5420 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5421 BitWidth); 5422 if (!RangeFromAffine.isFullSet()) 5423 ConservativeResult = 5424 ConservativeResult.intersectWith(RangeFromAffine); 5425 5426 auto RangeFromFactoring = getRangeViaFactoring( 5427 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5428 BitWidth); 5429 if (!RangeFromFactoring.isFullSet()) 5430 ConservativeResult = 5431 ConservativeResult.intersectWith(RangeFromFactoring); 5432 } 5433 } 5434 5435 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5436 } 5437 5438 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5439 // Check if the IR explicitly contains !range metadata. 5440 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5441 if (MDRange.hasValue()) 5442 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5443 5444 // Split here to avoid paying the compile-time cost of calling both 5445 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5446 // if needed. 5447 const DataLayout &DL = getDataLayout(); 5448 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5449 // For a SCEVUnknown, ask ValueTracking. 5450 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5451 if (Known.One != ~Known.Zero + 1) 5452 ConservativeResult = 5453 ConservativeResult.intersectWith(ConstantRange(Known.One, 5454 ~Known.Zero + 1)); 5455 } else { 5456 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5457 "generalize as needed!"); 5458 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5459 if (NS > 1) 5460 ConservativeResult = ConservativeResult.intersectWith( 5461 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5462 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5463 } 5464 5465 return setRange(U, SignHint, std::move(ConservativeResult)); 5466 } 5467 5468 return setRange(S, SignHint, std::move(ConservativeResult)); 5469 } 5470 5471 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5472 // values that the expression can take. Initially, the expression has a value 5473 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5474 // argument defines if we treat Step as signed or unsigned. 5475 static ConstantRange getRangeForAffineARHelper(APInt Step, 5476 const ConstantRange &StartRange, 5477 const APInt &MaxBECount, 5478 unsigned BitWidth, bool Signed) { 5479 // If either Step or MaxBECount is 0, then the expression won't change, and we 5480 // just need to return the initial range. 5481 if (Step == 0 || MaxBECount == 0) 5482 return StartRange; 5483 5484 // If we don't know anything about the initial value (i.e. StartRange is 5485 // FullRange), then we don't know anything about the final range either. 5486 // Return FullRange. 5487 if (StartRange.isFullSet()) 5488 return ConstantRange(BitWidth, /* isFullSet = */ true); 5489 5490 // If Step is signed and negative, then we use its absolute value, but we also 5491 // note that we're moving in the opposite direction. 5492 bool Descending = Signed && Step.isNegative(); 5493 5494 if (Signed) 5495 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5496 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5497 // This equations hold true due to the well-defined wrap-around behavior of 5498 // APInt. 5499 Step = Step.abs(); 5500 5501 // Check if Offset is more than full span of BitWidth. If it is, the 5502 // expression is guaranteed to overflow. 5503 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5504 return ConstantRange(BitWidth, /* isFullSet = */ true); 5505 5506 // Offset is by how much the expression can change. Checks above guarantee no 5507 // overflow here. 5508 APInt Offset = Step * MaxBECount; 5509 5510 // Minimum value of the final range will match the minimal value of StartRange 5511 // if the expression is increasing and will be decreased by Offset otherwise. 5512 // Maximum value of the final range will match the maximal value of StartRange 5513 // if the expression is decreasing and will be increased by Offset otherwise. 5514 APInt StartLower = StartRange.getLower(); 5515 APInt StartUpper = StartRange.getUpper() - 1; 5516 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5517 : (StartUpper + std::move(Offset)); 5518 5519 // It's possible that the new minimum/maximum value will fall into the initial 5520 // range (due to wrap around). This means that the expression can take any 5521 // value in this bitwidth, and we have to return full range. 5522 if (StartRange.contains(MovedBoundary)) 5523 return ConstantRange(BitWidth, /* isFullSet = */ true); 5524 5525 APInt NewLower = 5526 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5527 APInt NewUpper = 5528 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5529 NewUpper += 1; 5530 5531 // If we end up with full range, return a proper full range. 5532 if (NewLower == NewUpper) 5533 return ConstantRange(BitWidth, /* isFullSet = */ true); 5534 5535 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5536 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5537 } 5538 5539 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5540 const SCEV *Step, 5541 const SCEV *MaxBECount, 5542 unsigned BitWidth) { 5543 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5544 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5545 "Precondition!"); 5546 5547 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5548 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5549 5550 // First, consider step signed. 5551 ConstantRange StartSRange = getSignedRange(Start); 5552 ConstantRange StepSRange = getSignedRange(Step); 5553 5554 // If Step can be both positive and negative, we need to find ranges for the 5555 // maximum absolute step values in both directions and union them. 5556 ConstantRange SR = 5557 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5558 MaxBECountValue, BitWidth, /* Signed = */ true); 5559 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5560 StartSRange, MaxBECountValue, 5561 BitWidth, /* Signed = */ true)); 5562 5563 // Next, consider step unsigned. 5564 ConstantRange UR = getRangeForAffineARHelper( 5565 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5566 MaxBECountValue, BitWidth, /* Signed = */ false); 5567 5568 // Finally, intersect signed and unsigned ranges. 5569 return SR.intersectWith(UR); 5570 } 5571 5572 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5573 const SCEV *Step, 5574 const SCEV *MaxBECount, 5575 unsigned BitWidth) { 5576 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5577 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5578 5579 struct SelectPattern { 5580 Value *Condition = nullptr; 5581 APInt TrueValue; 5582 APInt FalseValue; 5583 5584 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5585 const SCEV *S) { 5586 Optional<unsigned> CastOp; 5587 APInt Offset(BitWidth, 0); 5588 5589 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5590 "Should be!"); 5591 5592 // Peel off a constant offset: 5593 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5594 // In the future we could consider being smarter here and handle 5595 // {Start+Step,+,Step} too. 5596 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5597 return; 5598 5599 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5600 S = SA->getOperand(1); 5601 } 5602 5603 // Peel off a cast operation 5604 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5605 CastOp = SCast->getSCEVType(); 5606 S = SCast->getOperand(); 5607 } 5608 5609 using namespace llvm::PatternMatch; 5610 5611 auto *SU = dyn_cast<SCEVUnknown>(S); 5612 const APInt *TrueVal, *FalseVal; 5613 if (!SU || 5614 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5615 m_APInt(FalseVal)))) { 5616 Condition = nullptr; 5617 return; 5618 } 5619 5620 TrueValue = *TrueVal; 5621 FalseValue = *FalseVal; 5622 5623 // Re-apply the cast we peeled off earlier 5624 if (CastOp.hasValue()) 5625 switch (*CastOp) { 5626 default: 5627 llvm_unreachable("Unknown SCEV cast type!"); 5628 5629 case scTruncate: 5630 TrueValue = TrueValue.trunc(BitWidth); 5631 FalseValue = FalseValue.trunc(BitWidth); 5632 break; 5633 case scZeroExtend: 5634 TrueValue = TrueValue.zext(BitWidth); 5635 FalseValue = FalseValue.zext(BitWidth); 5636 break; 5637 case scSignExtend: 5638 TrueValue = TrueValue.sext(BitWidth); 5639 FalseValue = FalseValue.sext(BitWidth); 5640 break; 5641 } 5642 5643 // Re-apply the constant offset we peeled off earlier 5644 TrueValue += Offset; 5645 FalseValue += Offset; 5646 } 5647 5648 bool isRecognized() { return Condition != nullptr; } 5649 }; 5650 5651 SelectPattern StartPattern(*this, BitWidth, Start); 5652 if (!StartPattern.isRecognized()) 5653 return ConstantRange(BitWidth, /* isFullSet = */ true); 5654 5655 SelectPattern StepPattern(*this, BitWidth, Step); 5656 if (!StepPattern.isRecognized()) 5657 return ConstantRange(BitWidth, /* isFullSet = */ true); 5658 5659 if (StartPattern.Condition != StepPattern.Condition) { 5660 // We don't handle this case today; but we could, by considering four 5661 // possibilities below instead of two. I'm not sure if there are cases where 5662 // that will help over what getRange already does, though. 5663 return ConstantRange(BitWidth, /* isFullSet = */ true); 5664 } 5665 5666 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5667 // construct arbitrary general SCEV expressions here. This function is called 5668 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5669 // say) can end up caching a suboptimal value. 5670 5671 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5672 // C2352 and C2512 (otherwise it isn't needed). 5673 5674 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5675 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5676 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5677 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5678 5679 ConstantRange TrueRange = 5680 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5681 ConstantRange FalseRange = 5682 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5683 5684 return TrueRange.unionWith(FalseRange); 5685 } 5686 5687 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5688 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5689 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5690 5691 // Return early if there are no flags to propagate to the SCEV. 5692 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5693 if (BinOp->hasNoUnsignedWrap()) 5694 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5695 if (BinOp->hasNoSignedWrap()) 5696 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5697 if (Flags == SCEV::FlagAnyWrap) 5698 return SCEV::FlagAnyWrap; 5699 5700 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5701 } 5702 5703 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5704 // Here we check that I is in the header of the innermost loop containing I, 5705 // since we only deal with instructions in the loop header. The actual loop we 5706 // need to check later will come from an add recurrence, but getting that 5707 // requires computing the SCEV of the operands, which can be expensive. This 5708 // check we can do cheaply to rule out some cases early. 5709 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5710 if (InnermostContainingLoop == nullptr || 5711 InnermostContainingLoop->getHeader() != I->getParent()) 5712 return false; 5713 5714 // Only proceed if we can prove that I does not yield poison. 5715 if (!programUndefinedIfFullPoison(I)) 5716 return false; 5717 5718 // At this point we know that if I is executed, then it does not wrap 5719 // according to at least one of NSW or NUW. If I is not executed, then we do 5720 // not know if the calculation that I represents would wrap. Multiple 5721 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5722 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5723 // derived from other instructions that map to the same SCEV. We cannot make 5724 // that guarantee for cases where I is not executed. So we need to find the 5725 // loop that I is considered in relation to and prove that I is executed for 5726 // every iteration of that loop. That implies that the value that I 5727 // calculates does not wrap anywhere in the loop, so then we can apply the 5728 // flags to the SCEV. 5729 // 5730 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5731 // from different loops, so that we know which loop to prove that I is 5732 // executed in. 5733 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5734 // I could be an extractvalue from a call to an overflow intrinsic. 5735 // TODO: We can do better here in some cases. 5736 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5737 return false; 5738 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5739 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5740 bool AllOtherOpsLoopInvariant = true; 5741 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5742 ++OtherOpIndex) { 5743 if (OtherOpIndex != OpIndex) { 5744 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5745 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5746 AllOtherOpsLoopInvariant = false; 5747 break; 5748 } 5749 } 5750 } 5751 if (AllOtherOpsLoopInvariant && 5752 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5753 return true; 5754 } 5755 } 5756 return false; 5757 } 5758 5759 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5760 // If we know that \c I can never be poison period, then that's enough. 5761 if (isSCEVExprNeverPoison(I)) 5762 return true; 5763 5764 // For an add recurrence specifically, we assume that infinite loops without 5765 // side effects are undefined behavior, and then reason as follows: 5766 // 5767 // If the add recurrence is poison in any iteration, it is poison on all 5768 // future iterations (since incrementing poison yields poison). If the result 5769 // of the add recurrence is fed into the loop latch condition and the loop 5770 // does not contain any throws or exiting blocks other than the latch, we now 5771 // have the ability to "choose" whether the backedge is taken or not (by 5772 // choosing a sufficiently evil value for the poison feeding into the branch) 5773 // for every iteration including and after the one in which \p I first became 5774 // poison. There are two possibilities (let's call the iteration in which \p 5775 // I first became poison as K): 5776 // 5777 // 1. In the set of iterations including and after K, the loop body executes 5778 // no side effects. In this case executing the backege an infinte number 5779 // of times will yield undefined behavior. 5780 // 5781 // 2. In the set of iterations including and after K, the loop body executes 5782 // at least one side effect. In this case, that specific instance of side 5783 // effect is control dependent on poison, which also yields undefined 5784 // behavior. 5785 5786 auto *ExitingBB = L->getExitingBlock(); 5787 auto *LatchBB = L->getLoopLatch(); 5788 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5789 return false; 5790 5791 SmallPtrSet<const Instruction *, 16> Pushed; 5792 SmallVector<const Instruction *, 8> PoisonStack; 5793 5794 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5795 // things that are known to be fully poison under that assumption go on the 5796 // PoisonStack. 5797 Pushed.insert(I); 5798 PoisonStack.push_back(I); 5799 5800 bool LatchControlDependentOnPoison = false; 5801 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5802 const Instruction *Poison = PoisonStack.pop_back_val(); 5803 5804 for (auto *PoisonUser : Poison->users()) { 5805 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5806 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5807 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5808 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5809 assert(BI->isConditional() && "Only possibility!"); 5810 if (BI->getParent() == LatchBB) { 5811 LatchControlDependentOnPoison = true; 5812 break; 5813 } 5814 } 5815 } 5816 } 5817 5818 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5819 } 5820 5821 ScalarEvolution::LoopProperties 5822 ScalarEvolution::getLoopProperties(const Loop *L) { 5823 using LoopProperties = ScalarEvolution::LoopProperties; 5824 5825 auto Itr = LoopPropertiesCache.find(L); 5826 if (Itr == LoopPropertiesCache.end()) { 5827 auto HasSideEffects = [](Instruction *I) { 5828 if (auto *SI = dyn_cast<StoreInst>(I)) 5829 return !SI->isSimple(); 5830 5831 return I->mayHaveSideEffects(); 5832 }; 5833 5834 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5835 /*HasNoSideEffects*/ true}; 5836 5837 for (auto *BB : L->getBlocks()) 5838 for (auto &I : *BB) { 5839 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5840 LP.HasNoAbnormalExits = false; 5841 if (HasSideEffects(&I)) 5842 LP.HasNoSideEffects = false; 5843 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5844 break; // We're already as pessimistic as we can get. 5845 } 5846 5847 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5848 assert(InsertPair.second && "We just checked!"); 5849 Itr = InsertPair.first; 5850 } 5851 5852 return Itr->second; 5853 } 5854 5855 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5856 if (!isSCEVable(V->getType())) 5857 return getUnknown(V); 5858 5859 if (Instruction *I = dyn_cast<Instruction>(V)) { 5860 // Don't attempt to analyze instructions in blocks that aren't 5861 // reachable. Such instructions don't matter, and they aren't required 5862 // to obey basic rules for definitions dominating uses which this 5863 // analysis depends on. 5864 if (!DT.isReachableFromEntry(I->getParent())) 5865 return getUnknown(V); 5866 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5867 return getConstant(CI); 5868 else if (isa<ConstantPointerNull>(V)) 5869 return getZero(V->getType()); 5870 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5871 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5872 else if (!isa<ConstantExpr>(V)) 5873 return getUnknown(V); 5874 5875 Operator *U = cast<Operator>(V); 5876 if (auto BO = MatchBinaryOp(U, DT)) { 5877 switch (BO->Opcode) { 5878 case Instruction::Add: { 5879 // The simple thing to do would be to just call getSCEV on both operands 5880 // and call getAddExpr with the result. However if we're looking at a 5881 // bunch of things all added together, this can be quite inefficient, 5882 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5883 // Instead, gather up all the operands and make a single getAddExpr call. 5884 // LLVM IR canonical form means we need only traverse the left operands. 5885 SmallVector<const SCEV *, 4> AddOps; 5886 do { 5887 if (BO->Op) { 5888 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5889 AddOps.push_back(OpSCEV); 5890 break; 5891 } 5892 5893 // If a NUW or NSW flag can be applied to the SCEV for this 5894 // addition, then compute the SCEV for this addition by itself 5895 // with a separate call to getAddExpr. We need to do that 5896 // instead of pushing the operands of the addition onto AddOps, 5897 // since the flags are only known to apply to this particular 5898 // addition - they may not apply to other additions that can be 5899 // formed with operands from AddOps. 5900 const SCEV *RHS = getSCEV(BO->RHS); 5901 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5902 if (Flags != SCEV::FlagAnyWrap) { 5903 const SCEV *LHS = getSCEV(BO->LHS); 5904 if (BO->Opcode == Instruction::Sub) 5905 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5906 else 5907 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5908 break; 5909 } 5910 } 5911 5912 if (BO->Opcode == Instruction::Sub) 5913 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5914 else 5915 AddOps.push_back(getSCEV(BO->RHS)); 5916 5917 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5918 if (!NewBO || (NewBO->Opcode != Instruction::Add && 5919 NewBO->Opcode != Instruction::Sub)) { 5920 AddOps.push_back(getSCEV(BO->LHS)); 5921 break; 5922 } 5923 BO = NewBO; 5924 } while (true); 5925 5926 return getAddExpr(AddOps); 5927 } 5928 5929 case Instruction::Mul: { 5930 SmallVector<const SCEV *, 4> MulOps; 5931 do { 5932 if (BO->Op) { 5933 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5934 MulOps.push_back(OpSCEV); 5935 break; 5936 } 5937 5938 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5939 if (Flags != SCEV::FlagAnyWrap) { 5940 MulOps.push_back( 5941 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5942 break; 5943 } 5944 } 5945 5946 MulOps.push_back(getSCEV(BO->RHS)); 5947 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5948 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5949 MulOps.push_back(getSCEV(BO->LHS)); 5950 break; 5951 } 5952 BO = NewBO; 5953 } while (true); 5954 5955 return getMulExpr(MulOps); 5956 } 5957 case Instruction::UDiv: 5958 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5959 case Instruction::URem: 5960 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5961 case Instruction::Sub: { 5962 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5963 if (BO->Op) 5964 Flags = getNoWrapFlagsFromUB(BO->Op); 5965 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5966 } 5967 case Instruction::And: 5968 // For an expression like x&255 that merely masks off the high bits, 5969 // use zext(trunc(x)) as the SCEV expression. 5970 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5971 if (CI->isZero()) 5972 return getSCEV(BO->RHS); 5973 if (CI->isMinusOne()) 5974 return getSCEV(BO->LHS); 5975 const APInt &A = CI->getValue(); 5976 5977 // Instcombine's ShrinkDemandedConstant may strip bits out of 5978 // constants, obscuring what would otherwise be a low-bits mask. 5979 // Use computeKnownBits to compute what ShrinkDemandedConstant 5980 // knew about to reconstruct a low-bits mask value. 5981 unsigned LZ = A.countLeadingZeros(); 5982 unsigned TZ = A.countTrailingZeros(); 5983 unsigned BitWidth = A.getBitWidth(); 5984 KnownBits Known(BitWidth); 5985 computeKnownBits(BO->LHS, Known, getDataLayout(), 5986 0, &AC, nullptr, &DT); 5987 5988 APInt EffectiveMask = 5989 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5990 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 5991 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 5992 const SCEV *LHS = getSCEV(BO->LHS); 5993 const SCEV *ShiftedLHS = nullptr; 5994 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 5995 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 5996 // For an expression like (x * 8) & 8, simplify the multiply. 5997 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 5998 unsigned GCD = std::min(MulZeros, TZ); 5999 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6000 SmallVector<const SCEV*, 4> MulOps; 6001 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6002 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6003 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6004 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6005 } 6006 } 6007 if (!ShiftedLHS) 6008 ShiftedLHS = getUDivExpr(LHS, MulCount); 6009 return getMulExpr( 6010 getZeroExtendExpr( 6011 getTruncateExpr(ShiftedLHS, 6012 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6013 BO->LHS->getType()), 6014 MulCount); 6015 } 6016 } 6017 break; 6018 6019 case Instruction::Or: 6020 // If the RHS of the Or is a constant, we may have something like: 6021 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6022 // optimizations will transparently handle this case. 6023 // 6024 // In order for this transformation to be safe, the LHS must be of the 6025 // form X*(2^n) and the Or constant must be less than 2^n. 6026 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6027 const SCEV *LHS = getSCEV(BO->LHS); 6028 const APInt &CIVal = CI->getValue(); 6029 if (GetMinTrailingZeros(LHS) >= 6030 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6031 // Build a plain add SCEV. 6032 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 6033 // If the LHS of the add was an addrec and it has no-wrap flags, 6034 // transfer the no-wrap flags, since an or won't introduce a wrap. 6035 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 6036 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 6037 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 6038 OldAR->getNoWrapFlags()); 6039 } 6040 return S; 6041 } 6042 } 6043 break; 6044 6045 case Instruction::Xor: 6046 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6047 // If the RHS of xor is -1, then this is a not operation. 6048 if (CI->isMinusOne()) 6049 return getNotSCEV(getSCEV(BO->LHS)); 6050 6051 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6052 // This is a variant of the check for xor with -1, and it handles 6053 // the case where instcombine has trimmed non-demanded bits out 6054 // of an xor with -1. 6055 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6056 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6057 if (LBO->getOpcode() == Instruction::And && 6058 LCI->getValue() == CI->getValue()) 6059 if (const SCEVZeroExtendExpr *Z = 6060 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6061 Type *UTy = BO->LHS->getType(); 6062 const SCEV *Z0 = Z->getOperand(); 6063 Type *Z0Ty = Z0->getType(); 6064 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6065 6066 // If C is a low-bits mask, the zero extend is serving to 6067 // mask off the high bits. Complement the operand and 6068 // re-apply the zext. 6069 if (CI->getValue().isMask(Z0TySize)) 6070 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6071 6072 // If C is a single bit, it may be in the sign-bit position 6073 // before the zero-extend. In this case, represent the xor 6074 // using an add, which is equivalent, and re-apply the zext. 6075 APInt Trunc = CI->getValue().trunc(Z0TySize); 6076 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6077 Trunc.isSignMask()) 6078 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6079 UTy); 6080 } 6081 } 6082 break; 6083 6084 case Instruction::Shl: 6085 // Turn shift left of a constant amount into a multiply. 6086 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6087 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6088 6089 // If the shift count is not less than the bitwidth, the result of 6090 // the shift is undefined. Don't try to analyze it, because the 6091 // resolution chosen here may differ from the resolution chosen in 6092 // other parts of the compiler. 6093 if (SA->getValue().uge(BitWidth)) 6094 break; 6095 6096 // It is currently not resolved how to interpret NSW for left 6097 // shift by BitWidth - 1, so we avoid applying flags in that 6098 // case. Remove this check (or this comment) once the situation 6099 // is resolved. See 6100 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6101 // and http://reviews.llvm.org/D8890 . 6102 auto Flags = SCEV::FlagAnyWrap; 6103 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6104 Flags = getNoWrapFlagsFromUB(BO->Op); 6105 6106 Constant *X = ConstantInt::get(getContext(), 6107 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6108 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6109 } 6110 break; 6111 6112 case Instruction::AShr: { 6113 // AShr X, C, where C is a constant. 6114 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6115 if (!CI) 6116 break; 6117 6118 Type *OuterTy = BO->LHS->getType(); 6119 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6120 // If the shift count is not less than the bitwidth, the result of 6121 // the shift is undefined. Don't try to analyze it, because the 6122 // resolution chosen here may differ from the resolution chosen in 6123 // other parts of the compiler. 6124 if (CI->getValue().uge(BitWidth)) 6125 break; 6126 6127 if (CI->isZero()) 6128 return getSCEV(BO->LHS); // shift by zero --> noop 6129 6130 uint64_t AShrAmt = CI->getZExtValue(); 6131 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6132 6133 Operator *L = dyn_cast<Operator>(BO->LHS); 6134 if (L && L->getOpcode() == Instruction::Shl) { 6135 // X = Shl A, n 6136 // Y = AShr X, m 6137 // Both n and m are constant. 6138 6139 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6140 if (L->getOperand(1) == BO->RHS) 6141 // For a two-shift sext-inreg, i.e. n = m, 6142 // use sext(trunc(x)) as the SCEV expression. 6143 return getSignExtendExpr( 6144 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6145 6146 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6147 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6148 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6149 if (ShlAmt > AShrAmt) { 6150 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6151 // expression. We already checked that ShlAmt < BitWidth, so 6152 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6153 // ShlAmt - AShrAmt < Amt. 6154 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6155 ShlAmt - AShrAmt); 6156 return getSignExtendExpr( 6157 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6158 getConstant(Mul)), OuterTy); 6159 } 6160 } 6161 } 6162 break; 6163 } 6164 } 6165 } 6166 6167 switch (U->getOpcode()) { 6168 case Instruction::Trunc: 6169 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6170 6171 case Instruction::ZExt: 6172 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6173 6174 case Instruction::SExt: 6175 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6176 // The NSW flag of a subtract does not always survive the conversion to 6177 // A + (-1)*B. By pushing sign extension onto its operands we are much 6178 // more likely to preserve NSW and allow later AddRec optimisations. 6179 // 6180 // NOTE: This is effectively duplicating this logic from getSignExtend: 6181 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6182 // but by that point the NSW information has potentially been lost. 6183 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6184 Type *Ty = U->getType(); 6185 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6186 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6187 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6188 } 6189 } 6190 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6191 6192 case Instruction::BitCast: 6193 // BitCasts are no-op casts so we just eliminate the cast. 6194 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6195 return getSCEV(U->getOperand(0)); 6196 break; 6197 6198 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6199 // lead to pointer expressions which cannot safely be expanded to GEPs, 6200 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6201 // simplifying integer expressions. 6202 6203 case Instruction::GetElementPtr: 6204 return createNodeForGEP(cast<GEPOperator>(U)); 6205 6206 case Instruction::PHI: 6207 return createNodeForPHI(cast<PHINode>(U)); 6208 6209 case Instruction::Select: 6210 // U can also be a select constant expr, which let fall through. Since 6211 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6212 // constant expressions cannot have instructions as operands, we'd have 6213 // returned getUnknown for a select constant expressions anyway. 6214 if (isa<Instruction>(U)) 6215 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6216 U->getOperand(1), U->getOperand(2)); 6217 break; 6218 6219 case Instruction::Call: 6220 case Instruction::Invoke: 6221 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6222 return getSCEV(RV); 6223 break; 6224 } 6225 6226 return getUnknown(V); 6227 } 6228 6229 //===----------------------------------------------------------------------===// 6230 // Iteration Count Computation Code 6231 // 6232 6233 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6234 if (!ExitCount) 6235 return 0; 6236 6237 ConstantInt *ExitConst = ExitCount->getValue(); 6238 6239 // Guard against huge trip counts. 6240 if (ExitConst->getValue().getActiveBits() > 32) 6241 return 0; 6242 6243 // In case of integer overflow, this returns 0, which is correct. 6244 return ((unsigned)ExitConst->getZExtValue()) + 1; 6245 } 6246 6247 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6248 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6249 return getSmallConstantTripCount(L, ExitingBB); 6250 6251 // No trip count information for multiple exits. 6252 return 0; 6253 } 6254 6255 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6256 BasicBlock *ExitingBlock) { 6257 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6258 assert(L->isLoopExiting(ExitingBlock) && 6259 "Exiting block must actually branch out of the loop!"); 6260 const SCEVConstant *ExitCount = 6261 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6262 return getConstantTripCount(ExitCount); 6263 } 6264 6265 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6266 const auto *MaxExitCount = 6267 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6268 return getConstantTripCount(MaxExitCount); 6269 } 6270 6271 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6272 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6273 return getSmallConstantTripMultiple(L, ExitingBB); 6274 6275 // No trip multiple information for multiple exits. 6276 return 0; 6277 } 6278 6279 /// Returns the largest constant divisor of the trip count of this loop as a 6280 /// normal unsigned value, if possible. This means that the actual trip count is 6281 /// always a multiple of the returned value (don't forget the trip count could 6282 /// very well be zero as well!). 6283 /// 6284 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6285 /// multiple of a constant (which is also the case if the trip count is simply 6286 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6287 /// if the trip count is very large (>= 2^32). 6288 /// 6289 /// As explained in the comments for getSmallConstantTripCount, this assumes 6290 /// that control exits the loop via ExitingBlock. 6291 unsigned 6292 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6293 BasicBlock *ExitingBlock) { 6294 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6295 assert(L->isLoopExiting(ExitingBlock) && 6296 "Exiting block must actually branch out of the loop!"); 6297 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6298 if (ExitCount == getCouldNotCompute()) 6299 return 1; 6300 6301 // Get the trip count from the BE count by adding 1. 6302 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6303 6304 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6305 if (!TC) 6306 // Attempt to factor more general cases. Returns the greatest power of 6307 // two divisor. If overflow happens, the trip count expression is still 6308 // divisible by the greatest power of 2 divisor returned. 6309 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6310 6311 ConstantInt *Result = TC->getValue(); 6312 6313 // Guard against huge trip counts (this requires checking 6314 // for zero to handle the case where the trip count == -1 and the 6315 // addition wraps). 6316 if (!Result || Result->getValue().getActiveBits() > 32 || 6317 Result->getValue().getActiveBits() == 0) 6318 return 1; 6319 6320 return (unsigned)Result->getZExtValue(); 6321 } 6322 6323 /// Get the expression for the number of loop iterations for which this loop is 6324 /// guaranteed not to exit via ExitingBlock. Otherwise return 6325 /// SCEVCouldNotCompute. 6326 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6327 BasicBlock *ExitingBlock) { 6328 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6329 } 6330 6331 const SCEV * 6332 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6333 SCEVUnionPredicate &Preds) { 6334 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 6335 } 6336 6337 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6338 return getBackedgeTakenInfo(L).getExact(this); 6339 } 6340 6341 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6342 /// known never to be less than the actual backedge taken count. 6343 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6344 return getBackedgeTakenInfo(L).getMax(this); 6345 } 6346 6347 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6348 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6349 } 6350 6351 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6352 static void 6353 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6354 BasicBlock *Header = L->getHeader(); 6355 6356 // Push all Loop-header PHIs onto the Worklist stack. 6357 for (BasicBlock::iterator I = Header->begin(); 6358 PHINode *PN = dyn_cast<PHINode>(I); ++I) 6359 Worklist.push_back(PN); 6360 } 6361 6362 const ScalarEvolution::BackedgeTakenInfo & 6363 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6364 auto &BTI = getBackedgeTakenInfo(L); 6365 if (BTI.hasFullInfo()) 6366 return BTI; 6367 6368 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6369 6370 if (!Pair.second) 6371 return Pair.first->second; 6372 6373 BackedgeTakenInfo Result = 6374 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6375 6376 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6377 } 6378 6379 const ScalarEvolution::BackedgeTakenInfo & 6380 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6381 // Initially insert an invalid entry for this loop. If the insertion 6382 // succeeds, proceed to actually compute a backedge-taken count and 6383 // update the value. The temporary CouldNotCompute value tells SCEV 6384 // code elsewhere that it shouldn't attempt to request a new 6385 // backedge-taken count, which could result in infinite recursion. 6386 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6387 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6388 if (!Pair.second) 6389 return Pair.first->second; 6390 6391 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6392 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6393 // must be cleared in this scope. 6394 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6395 6396 if (Result.getExact(this) != getCouldNotCompute()) { 6397 assert(isLoopInvariant(Result.getExact(this), L) && 6398 isLoopInvariant(Result.getMax(this), L) && 6399 "Computed backedge-taken count isn't loop invariant for loop!"); 6400 ++NumTripCountsComputed; 6401 } 6402 else if (Result.getMax(this) == getCouldNotCompute() && 6403 isa<PHINode>(L->getHeader()->begin())) { 6404 // Only count loops that have phi nodes as not being computable. 6405 ++NumTripCountsNotComputed; 6406 } 6407 6408 // Now that we know more about the trip count for this loop, forget any 6409 // existing SCEV values for PHI nodes in this loop since they are only 6410 // conservative estimates made without the benefit of trip count 6411 // information. This is similar to the code in forgetLoop, except that 6412 // it handles SCEVUnknown PHI nodes specially. 6413 if (Result.hasAnyInfo()) { 6414 SmallVector<Instruction *, 16> Worklist; 6415 PushLoopPHIs(L, Worklist); 6416 6417 SmallPtrSet<Instruction *, 8> Visited; 6418 while (!Worklist.empty()) { 6419 Instruction *I = Worklist.pop_back_val(); 6420 if (!Visited.insert(I).second) 6421 continue; 6422 6423 ValueExprMapType::iterator It = 6424 ValueExprMap.find_as(static_cast<Value *>(I)); 6425 if (It != ValueExprMap.end()) { 6426 const SCEV *Old = It->second; 6427 6428 // SCEVUnknown for a PHI either means that it has an unrecognized 6429 // structure, or it's a PHI that's in the progress of being computed 6430 // by createNodeForPHI. In the former case, additional loop trip 6431 // count information isn't going to change anything. In the later 6432 // case, createNodeForPHI will perform the necessary updates on its 6433 // own when it gets to that point. 6434 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6435 eraseValueFromMap(It->first); 6436 forgetMemoizedResults(Old); 6437 } 6438 if (PHINode *PN = dyn_cast<PHINode>(I)) 6439 ConstantEvolutionLoopExitValue.erase(PN); 6440 } 6441 6442 // Since we don't need to invalidate anything for correctness and we're 6443 // only invalidating to make SCEV's results more precise, we get to stop 6444 // early to avoid invalidating too much. This is especially important in 6445 // cases like: 6446 // 6447 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6448 // loop0: 6449 // %pn0 = phi 6450 // ... 6451 // loop1: 6452 // %pn1 = phi 6453 // ... 6454 // 6455 // where both loop0 and loop1's backedge taken count uses the SCEV 6456 // expression for %v. If we don't have the early stop below then in cases 6457 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6458 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6459 // count for loop1, effectively nullifying SCEV's trip count cache. 6460 for (auto *U : I->users()) 6461 if (auto *I = dyn_cast<Instruction>(U)) { 6462 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6463 if (LoopForUser && L->contains(LoopForUser)) 6464 Worklist.push_back(I); 6465 } 6466 } 6467 } 6468 6469 // Re-lookup the insert position, since the call to 6470 // computeBackedgeTakenCount above could result in a 6471 // recusive call to getBackedgeTakenInfo (on a different 6472 // loop), which would invalidate the iterator computed 6473 // earlier. 6474 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6475 } 6476 6477 void ScalarEvolution::forgetLoop(const Loop *L) { 6478 // Drop any stored trip count value. 6479 auto RemoveLoopFromBackedgeMap = 6480 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6481 auto BTCPos = Map.find(L); 6482 if (BTCPos != Map.end()) { 6483 BTCPos->second.clear(); 6484 Map.erase(BTCPos); 6485 } 6486 }; 6487 6488 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6489 SmallVector<Instruction *, 32> Worklist; 6490 SmallPtrSet<Instruction *, 16> Visited; 6491 6492 // Iterate over all the loops and sub-loops to drop SCEV information. 6493 while (!LoopWorklist.empty()) { 6494 auto *CurrL = LoopWorklist.pop_back_val(); 6495 6496 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6497 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6498 6499 // Drop information about predicated SCEV rewrites for this loop. 6500 for (auto I = PredicatedSCEVRewrites.begin(); 6501 I != PredicatedSCEVRewrites.end();) { 6502 std::pair<const SCEV *, const Loop *> Entry = I->first; 6503 if (Entry.second == CurrL) 6504 PredicatedSCEVRewrites.erase(I++); 6505 else 6506 ++I; 6507 } 6508 6509 auto LoopUsersItr = LoopUsers.find(CurrL); 6510 if (LoopUsersItr != LoopUsers.end()) { 6511 for (auto *S : LoopUsersItr->second) 6512 forgetMemoizedResults(S); 6513 LoopUsers.erase(LoopUsersItr); 6514 } 6515 6516 // Drop information about expressions based on loop-header PHIs. 6517 PushLoopPHIs(CurrL, Worklist); 6518 6519 while (!Worklist.empty()) { 6520 Instruction *I = Worklist.pop_back_val(); 6521 if (!Visited.insert(I).second) 6522 continue; 6523 6524 ValueExprMapType::iterator It = 6525 ValueExprMap.find_as(static_cast<Value *>(I)); 6526 if (It != ValueExprMap.end()) { 6527 eraseValueFromMap(It->first); 6528 forgetMemoizedResults(It->second); 6529 if (PHINode *PN = dyn_cast<PHINode>(I)) 6530 ConstantEvolutionLoopExitValue.erase(PN); 6531 } 6532 6533 PushDefUseChildren(I, Worklist); 6534 } 6535 6536 LoopPropertiesCache.erase(CurrL); 6537 // Forget all contained loops too, to avoid dangling entries in the 6538 // ValuesAtScopes map. 6539 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6540 } 6541 } 6542 6543 void ScalarEvolution::forgetValue(Value *V) { 6544 Instruction *I = dyn_cast<Instruction>(V); 6545 if (!I) return; 6546 6547 // Drop information about expressions based on loop-header PHIs. 6548 SmallVector<Instruction *, 16> Worklist; 6549 Worklist.push_back(I); 6550 6551 SmallPtrSet<Instruction *, 8> Visited; 6552 while (!Worklist.empty()) { 6553 I = Worklist.pop_back_val(); 6554 if (!Visited.insert(I).second) 6555 continue; 6556 6557 ValueExprMapType::iterator It = 6558 ValueExprMap.find_as(static_cast<Value *>(I)); 6559 if (It != ValueExprMap.end()) { 6560 eraseValueFromMap(It->first); 6561 forgetMemoizedResults(It->second); 6562 if (PHINode *PN = dyn_cast<PHINode>(I)) 6563 ConstantEvolutionLoopExitValue.erase(PN); 6564 } 6565 6566 PushDefUseChildren(I, Worklist); 6567 } 6568 } 6569 6570 /// Get the exact loop backedge taken count considering all loop exits. A 6571 /// computable result can only be returned for loops with a single exit. 6572 /// Returning the minimum taken count among all exits is incorrect because one 6573 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 6574 /// the limit of each loop test is never skipped. This is a valid assumption as 6575 /// long as the loop exits via that test. For precise results, it is the 6576 /// caller's responsibility to specify the relevant loop exit using 6577 /// getExact(ExitingBlock, SE). 6578 const SCEV * 6579 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE, 6580 SCEVUnionPredicate *Preds) const { 6581 // If any exits were not computable, the loop is not computable. 6582 if (!isComplete() || ExitNotTaken.empty()) 6583 return SE->getCouldNotCompute(); 6584 6585 const SCEV *BECount = nullptr; 6586 for (auto &ENT : ExitNotTaken) { 6587 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 6588 6589 if (!BECount) 6590 BECount = ENT.ExactNotTaken; 6591 else if (BECount != ENT.ExactNotTaken) 6592 return SE->getCouldNotCompute(); 6593 if (Preds && !ENT.hasAlwaysTruePredicate()) 6594 Preds->add(ENT.Predicate.get()); 6595 6596 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6597 "Predicate should be always true!"); 6598 } 6599 6600 assert(BECount && "Invalid not taken count for loop exit"); 6601 return BECount; 6602 } 6603 6604 /// Get the exact not taken count for this loop exit. 6605 const SCEV * 6606 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6607 ScalarEvolution *SE) const { 6608 for (auto &ENT : ExitNotTaken) 6609 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6610 return ENT.ExactNotTaken; 6611 6612 return SE->getCouldNotCompute(); 6613 } 6614 6615 /// getMax - Get the max backedge taken count for the loop. 6616 const SCEV * 6617 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6618 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6619 return !ENT.hasAlwaysTruePredicate(); 6620 }; 6621 6622 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6623 return SE->getCouldNotCompute(); 6624 6625 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6626 "No point in having a non-constant max backedge taken count!"); 6627 return getMax(); 6628 } 6629 6630 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6631 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6632 return !ENT.hasAlwaysTruePredicate(); 6633 }; 6634 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6635 } 6636 6637 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6638 ScalarEvolution *SE) const { 6639 if (getMax() && getMax() != SE->getCouldNotCompute() && 6640 SE->hasOperand(getMax(), S)) 6641 return true; 6642 6643 for (auto &ENT : ExitNotTaken) 6644 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6645 SE->hasOperand(ENT.ExactNotTaken, S)) 6646 return true; 6647 6648 return false; 6649 } 6650 6651 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6652 : ExactNotTaken(E), MaxNotTaken(E) { 6653 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6654 isa<SCEVConstant>(MaxNotTaken)) && 6655 "No point in having a non-constant max backedge taken count!"); 6656 } 6657 6658 ScalarEvolution::ExitLimit::ExitLimit( 6659 const SCEV *E, const SCEV *M, bool MaxOrZero, 6660 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6661 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6662 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6663 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6664 "Exact is not allowed to be less precise than Max"); 6665 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6666 isa<SCEVConstant>(MaxNotTaken)) && 6667 "No point in having a non-constant max backedge taken count!"); 6668 for (auto *PredSet : PredSetList) 6669 for (auto *P : *PredSet) 6670 addPredicate(P); 6671 } 6672 6673 ScalarEvolution::ExitLimit::ExitLimit( 6674 const SCEV *E, const SCEV *M, bool MaxOrZero, 6675 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6676 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6677 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6678 isa<SCEVConstant>(MaxNotTaken)) && 6679 "No point in having a non-constant max backedge taken count!"); 6680 } 6681 6682 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6683 bool MaxOrZero) 6684 : ExitLimit(E, M, MaxOrZero, None) { 6685 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6686 isa<SCEVConstant>(MaxNotTaken)) && 6687 "No point in having a non-constant max backedge taken count!"); 6688 } 6689 6690 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6691 /// computable exit into a persistent ExitNotTakenInfo array. 6692 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6693 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6694 &&ExitCounts, 6695 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6696 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6697 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6698 6699 ExitNotTaken.reserve(ExitCounts.size()); 6700 std::transform( 6701 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6702 [&](const EdgeExitInfo &EEI) { 6703 BasicBlock *ExitBB = EEI.first; 6704 const ExitLimit &EL = EEI.second; 6705 if (EL.Predicates.empty()) 6706 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6707 6708 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6709 for (auto *Pred : EL.Predicates) 6710 Predicate->add(Pred); 6711 6712 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6713 }); 6714 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6715 "No point in having a non-constant max backedge taken count!"); 6716 } 6717 6718 /// Invalidate this result and free the ExitNotTakenInfo array. 6719 void ScalarEvolution::BackedgeTakenInfo::clear() { 6720 ExitNotTaken.clear(); 6721 } 6722 6723 /// Compute the number of times the backedge of the specified loop will execute. 6724 ScalarEvolution::BackedgeTakenInfo 6725 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6726 bool AllowPredicates) { 6727 SmallVector<BasicBlock *, 8> ExitingBlocks; 6728 L->getExitingBlocks(ExitingBlocks); 6729 6730 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6731 6732 SmallVector<EdgeExitInfo, 4> ExitCounts; 6733 bool CouldComputeBECount = true; 6734 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6735 const SCEV *MustExitMaxBECount = nullptr; 6736 const SCEV *MayExitMaxBECount = nullptr; 6737 bool MustExitMaxOrZero = false; 6738 6739 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6740 // and compute maxBECount. 6741 // Do a union of all the predicates here. 6742 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6743 BasicBlock *ExitBB = ExitingBlocks[i]; 6744 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6745 6746 assert((AllowPredicates || EL.Predicates.empty()) && 6747 "Predicated exit limit when predicates are not allowed!"); 6748 6749 // 1. For each exit that can be computed, add an entry to ExitCounts. 6750 // CouldComputeBECount is true only if all exits can be computed. 6751 if (EL.ExactNotTaken == getCouldNotCompute()) 6752 // We couldn't compute an exact value for this exit, so 6753 // we won't be able to compute an exact value for the loop. 6754 CouldComputeBECount = false; 6755 else 6756 ExitCounts.emplace_back(ExitBB, EL); 6757 6758 // 2. Derive the loop's MaxBECount from each exit's max number of 6759 // non-exiting iterations. Partition the loop exits into two kinds: 6760 // LoopMustExits and LoopMayExits. 6761 // 6762 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6763 // is a LoopMayExit. If any computable LoopMustExit is found, then 6764 // MaxBECount is the minimum EL.MaxNotTaken of computable 6765 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6766 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6767 // computable EL.MaxNotTaken. 6768 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6769 DT.dominates(ExitBB, Latch)) { 6770 if (!MustExitMaxBECount) { 6771 MustExitMaxBECount = EL.MaxNotTaken; 6772 MustExitMaxOrZero = EL.MaxOrZero; 6773 } else { 6774 MustExitMaxBECount = 6775 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6776 } 6777 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6778 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6779 MayExitMaxBECount = EL.MaxNotTaken; 6780 else { 6781 MayExitMaxBECount = 6782 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6783 } 6784 } 6785 } 6786 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6787 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6788 // The loop backedge will be taken the maximum or zero times if there's 6789 // a single exit that must be taken the maximum or zero times. 6790 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6791 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6792 MaxBECount, MaxOrZero); 6793 } 6794 6795 ScalarEvolution::ExitLimit 6796 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6797 bool AllowPredicates) { 6798 // Okay, we've chosen an exiting block. See what condition causes us to exit 6799 // at this block and remember the exit block and whether all other targets 6800 // lead to the loop header. 6801 bool MustExecuteLoopHeader = true; 6802 BasicBlock *Exit = nullptr; 6803 for (auto *SBB : successors(ExitingBlock)) 6804 if (!L->contains(SBB)) { 6805 if (Exit) // Multiple exit successors. 6806 return getCouldNotCompute(); 6807 Exit = SBB; 6808 } else if (SBB != L->getHeader()) { 6809 MustExecuteLoopHeader = false; 6810 } 6811 6812 // At this point, we know we have a conditional branch that determines whether 6813 // the loop is exited. However, we don't know if the branch is executed each 6814 // time through the loop. If not, then the execution count of the branch will 6815 // not be equal to the trip count of the loop. 6816 // 6817 // Currently we check for this by checking to see if the Exit branch goes to 6818 // the loop header. If so, we know it will always execute the same number of 6819 // times as the loop. We also handle the case where the exit block *is* the 6820 // loop header. This is common for un-rotated loops. 6821 // 6822 // If both of those tests fail, walk up the unique predecessor chain to the 6823 // header, stopping if there is an edge that doesn't exit the loop. If the 6824 // header is reached, the execution count of the branch will be equal to the 6825 // trip count of the loop. 6826 // 6827 // More extensive analysis could be done to handle more cases here. 6828 // 6829 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 6830 // The simple checks failed, try climbing the unique predecessor chain 6831 // up to the header. 6832 bool Ok = false; 6833 for (BasicBlock *BB = ExitingBlock; BB; ) { 6834 BasicBlock *Pred = BB->getUniquePredecessor(); 6835 if (!Pred) 6836 return getCouldNotCompute(); 6837 TerminatorInst *PredTerm = Pred->getTerminator(); 6838 for (const BasicBlock *PredSucc : PredTerm->successors()) { 6839 if (PredSucc == BB) 6840 continue; 6841 // If the predecessor has a successor that isn't BB and isn't 6842 // outside the loop, assume the worst. 6843 if (L->contains(PredSucc)) 6844 return getCouldNotCompute(); 6845 } 6846 if (Pred == L->getHeader()) { 6847 Ok = true; 6848 break; 6849 } 6850 BB = Pred; 6851 } 6852 if (!Ok) 6853 return getCouldNotCompute(); 6854 } 6855 6856 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6857 TerminatorInst *Term = ExitingBlock->getTerminator(); 6858 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 6859 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 6860 // Proceed to the next level to examine the exit condition expression. 6861 return computeExitLimitFromCond( 6862 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 6863 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 6864 } 6865 6866 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 6867 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 6868 /*ControlsExit=*/IsOnlyExit); 6869 6870 return getCouldNotCompute(); 6871 } 6872 6873 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 6874 const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB, 6875 bool ControlsExit, bool AllowPredicates) { 6876 ScalarEvolution::ExitLimitCacheTy Cache(L, TBB, FBB, AllowPredicates); 6877 return computeExitLimitFromCondCached(Cache, L, ExitCond, TBB, FBB, 6878 ControlsExit, AllowPredicates); 6879 } 6880 6881 Optional<ScalarEvolution::ExitLimit> 6882 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 6883 BasicBlock *TBB, BasicBlock *FBB, 6884 bool ControlsExit, bool AllowPredicates) { 6885 (void)this->L; 6886 (void)this->TBB; 6887 (void)this->FBB; 6888 (void)this->AllowPredicates; 6889 6890 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6891 this->AllowPredicates == AllowPredicates && 6892 "Variance in assumed invariant key components!"); 6893 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 6894 if (Itr == TripCountMap.end()) 6895 return None; 6896 return Itr->second; 6897 } 6898 6899 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 6900 BasicBlock *TBB, BasicBlock *FBB, 6901 bool ControlsExit, 6902 bool AllowPredicates, 6903 const ExitLimit &EL) { 6904 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6905 this->AllowPredicates == AllowPredicates && 6906 "Variance in assumed invariant key components!"); 6907 6908 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 6909 assert(InsertResult.second && "Expected successful insertion!"); 6910 (void)InsertResult; 6911 } 6912 6913 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 6914 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6915 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6916 6917 if (auto MaybeEL = 6918 Cache.find(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates)) 6919 return *MaybeEL; 6920 6921 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, TBB, FBB, 6922 ControlsExit, AllowPredicates); 6923 Cache.insert(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates, EL); 6924 return EL; 6925 } 6926 6927 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 6928 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6929 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6930 // Check if the controlling expression for this loop is an And or Or. 6931 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 6932 if (BO->getOpcode() == Instruction::And) { 6933 // Recurse on the operands of the and. 6934 bool EitherMayExit = L->contains(TBB); 6935 ExitLimit EL0 = computeExitLimitFromCondCached( 6936 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6937 AllowPredicates); 6938 ExitLimit EL1 = computeExitLimitFromCondCached( 6939 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6940 AllowPredicates); 6941 const SCEV *BECount = getCouldNotCompute(); 6942 const SCEV *MaxBECount = getCouldNotCompute(); 6943 if (EitherMayExit) { 6944 // Both conditions must be true for the loop to continue executing. 6945 // Choose the less conservative count. 6946 if (EL0.ExactNotTaken == getCouldNotCompute() || 6947 EL1.ExactNotTaken == getCouldNotCompute()) 6948 BECount = getCouldNotCompute(); 6949 else 6950 BECount = 6951 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6952 if (EL0.MaxNotTaken == getCouldNotCompute()) 6953 MaxBECount = EL1.MaxNotTaken; 6954 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6955 MaxBECount = EL0.MaxNotTaken; 6956 else 6957 MaxBECount = 6958 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6959 } else { 6960 // Both conditions must be true at the same time for the loop to exit. 6961 // For now, be conservative. 6962 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 6963 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6964 MaxBECount = EL0.MaxNotTaken; 6965 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6966 BECount = EL0.ExactNotTaken; 6967 } 6968 6969 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 6970 // to be more aggressive when computing BECount than when computing 6971 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 6972 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 6973 // to not. 6974 if (isa<SCEVCouldNotCompute>(MaxBECount) && 6975 !isa<SCEVCouldNotCompute>(BECount)) 6976 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 6977 6978 return ExitLimit(BECount, MaxBECount, false, 6979 {&EL0.Predicates, &EL1.Predicates}); 6980 } 6981 if (BO->getOpcode() == Instruction::Or) { 6982 // Recurse on the operands of the or. 6983 bool EitherMayExit = L->contains(FBB); 6984 ExitLimit EL0 = computeExitLimitFromCondCached( 6985 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6986 AllowPredicates); 6987 ExitLimit EL1 = computeExitLimitFromCondCached( 6988 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6989 AllowPredicates); 6990 const SCEV *BECount = getCouldNotCompute(); 6991 const SCEV *MaxBECount = getCouldNotCompute(); 6992 if (EitherMayExit) { 6993 // Both conditions must be false for the loop to continue executing. 6994 // Choose the less conservative count. 6995 if (EL0.ExactNotTaken == getCouldNotCompute() || 6996 EL1.ExactNotTaken == getCouldNotCompute()) 6997 BECount = getCouldNotCompute(); 6998 else 6999 BECount = 7000 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7001 if (EL0.MaxNotTaken == getCouldNotCompute()) 7002 MaxBECount = EL1.MaxNotTaken; 7003 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7004 MaxBECount = EL0.MaxNotTaken; 7005 else 7006 MaxBECount = 7007 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7008 } else { 7009 // Both conditions must be false at the same time for the loop to exit. 7010 // For now, be conservative. 7011 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 7012 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7013 MaxBECount = EL0.MaxNotTaken; 7014 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7015 BECount = EL0.ExactNotTaken; 7016 } 7017 7018 return ExitLimit(BECount, MaxBECount, false, 7019 {&EL0.Predicates, &EL1.Predicates}); 7020 } 7021 } 7022 7023 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7024 // Proceed to the next level to examine the icmp. 7025 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7026 ExitLimit EL = 7027 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 7028 if (EL.hasFullInfo() || !AllowPredicates) 7029 return EL; 7030 7031 // Try again, but use SCEV predicates this time. 7032 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 7033 /*AllowPredicates=*/true); 7034 } 7035 7036 // Check for a constant condition. These are normally stripped out by 7037 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7038 // preserve the CFG and is temporarily leaving constant conditions 7039 // in place. 7040 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7041 if (L->contains(FBB) == !CI->getZExtValue()) 7042 // The backedge is always taken. 7043 return getCouldNotCompute(); 7044 else 7045 // The backedge is never taken. 7046 return getZero(CI->getType()); 7047 } 7048 7049 // If it's not an integer or pointer comparison then compute it the hard way. 7050 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 7051 } 7052 7053 ScalarEvolution::ExitLimit 7054 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7055 ICmpInst *ExitCond, 7056 BasicBlock *TBB, 7057 BasicBlock *FBB, 7058 bool ControlsExit, 7059 bool AllowPredicates) { 7060 // If the condition was exit on true, convert the condition to exit on false 7061 ICmpInst::Predicate Cond; 7062 if (!L->contains(FBB)) 7063 Cond = ExitCond->getPredicate(); 7064 else 7065 Cond = ExitCond->getInversePredicate(); 7066 7067 // Handle common loops like: for (X = "string"; *X; ++X) 7068 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7069 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7070 ExitLimit ItCnt = 7071 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 7072 if (ItCnt.hasAnyInfo()) 7073 return ItCnt; 7074 } 7075 7076 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7077 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7078 7079 // Try to evaluate any dependencies out of the loop. 7080 LHS = getSCEVAtScope(LHS, L); 7081 RHS = getSCEVAtScope(RHS, L); 7082 7083 // At this point, we would like to compute how many iterations of the 7084 // loop the predicate will return true for these inputs. 7085 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7086 // If there is a loop-invariant, force it into the RHS. 7087 std::swap(LHS, RHS); 7088 Cond = ICmpInst::getSwappedPredicate(Cond); 7089 } 7090 7091 // Simplify the operands before analyzing them. 7092 (void)SimplifyICmpOperands(Cond, LHS, RHS); 7093 7094 // If we have a comparison of a chrec against a constant, try to use value 7095 // ranges to answer this query. 7096 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7097 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7098 if (AddRec->getLoop() == L) { 7099 // Form the constant range. 7100 ConstantRange CompRange = 7101 ConstantRange::makeExactICmpRegion(Cond, RHSC->getAPInt()); 7102 7103 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7104 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7105 } 7106 7107 switch (Cond) { 7108 case ICmpInst::ICMP_NE: { // while (X != Y) 7109 // Convert to: while (X-Y != 0) 7110 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7111 AllowPredicates); 7112 if (EL.hasAnyInfo()) return EL; 7113 break; 7114 } 7115 case ICmpInst::ICMP_EQ: { // while (X == Y) 7116 // Convert to: while (X-Y == 0) 7117 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7118 if (EL.hasAnyInfo()) return EL; 7119 break; 7120 } 7121 case ICmpInst::ICMP_SLT: 7122 case ICmpInst::ICMP_ULT: { // while (X < Y) 7123 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 7124 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7125 AllowPredicates); 7126 if (EL.hasAnyInfo()) return EL; 7127 break; 7128 } 7129 case ICmpInst::ICMP_SGT: 7130 case ICmpInst::ICMP_UGT: { // while (X > Y) 7131 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 7132 ExitLimit EL = 7133 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7134 AllowPredicates); 7135 if (EL.hasAnyInfo()) return EL; 7136 break; 7137 } 7138 default: 7139 break; 7140 } 7141 7142 auto *ExhaustiveCount = 7143 computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 7144 7145 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7146 return ExhaustiveCount; 7147 7148 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7149 ExitCond->getOperand(1), L, Cond); 7150 } 7151 7152 ScalarEvolution::ExitLimit 7153 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7154 SwitchInst *Switch, 7155 BasicBlock *ExitingBlock, 7156 bool ControlsExit) { 7157 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7158 7159 // Give up if the exit is the default dest of a switch. 7160 if (Switch->getDefaultDest() == ExitingBlock) 7161 return getCouldNotCompute(); 7162 7163 assert(L->contains(Switch->getDefaultDest()) && 7164 "Default case must not exit the loop!"); 7165 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7166 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7167 7168 // while (X != Y) --> while (X-Y != 0) 7169 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7170 if (EL.hasAnyInfo()) 7171 return EL; 7172 7173 return getCouldNotCompute(); 7174 } 7175 7176 static ConstantInt * 7177 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7178 ScalarEvolution &SE) { 7179 const SCEV *InVal = SE.getConstant(C); 7180 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7181 assert(isa<SCEVConstant>(Val) && 7182 "Evaluation of SCEV at constant didn't fold correctly?"); 7183 return cast<SCEVConstant>(Val)->getValue(); 7184 } 7185 7186 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7187 /// compute the backedge execution count. 7188 ScalarEvolution::ExitLimit 7189 ScalarEvolution::computeLoadConstantCompareExitLimit( 7190 LoadInst *LI, 7191 Constant *RHS, 7192 const Loop *L, 7193 ICmpInst::Predicate predicate) { 7194 if (LI->isVolatile()) return getCouldNotCompute(); 7195 7196 // Check to see if the loaded pointer is a getelementptr of a global. 7197 // TODO: Use SCEV instead of manually grubbing with GEPs. 7198 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7199 if (!GEP) return getCouldNotCompute(); 7200 7201 // Make sure that it is really a constant global we are gepping, with an 7202 // initializer, and make sure the first IDX is really 0. 7203 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7204 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7205 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7206 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7207 return getCouldNotCompute(); 7208 7209 // Okay, we allow one non-constant index into the GEP instruction. 7210 Value *VarIdx = nullptr; 7211 std::vector<Constant*> Indexes; 7212 unsigned VarIdxNum = 0; 7213 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7214 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7215 Indexes.push_back(CI); 7216 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7217 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7218 VarIdx = GEP->getOperand(i); 7219 VarIdxNum = i-2; 7220 Indexes.push_back(nullptr); 7221 } 7222 7223 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7224 if (!VarIdx) 7225 return getCouldNotCompute(); 7226 7227 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7228 // Check to see if X is a loop variant variable value now. 7229 const SCEV *Idx = getSCEV(VarIdx); 7230 Idx = getSCEVAtScope(Idx, L); 7231 7232 // We can only recognize very limited forms of loop index expressions, in 7233 // particular, only affine AddRec's like {C1,+,C2}. 7234 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7235 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7236 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7237 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7238 return getCouldNotCompute(); 7239 7240 unsigned MaxSteps = MaxBruteForceIterations; 7241 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7242 ConstantInt *ItCst = ConstantInt::get( 7243 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7244 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7245 7246 // Form the GEP offset. 7247 Indexes[VarIdxNum] = Val; 7248 7249 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7250 Indexes); 7251 if (!Result) break; // Cannot compute! 7252 7253 // Evaluate the condition for this iteration. 7254 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7255 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7256 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7257 ++NumArrayLenItCounts; 7258 return getConstant(ItCst); // Found terminating iteration! 7259 } 7260 } 7261 return getCouldNotCompute(); 7262 } 7263 7264 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7265 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7266 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7267 if (!RHS) 7268 return getCouldNotCompute(); 7269 7270 const BasicBlock *Latch = L->getLoopLatch(); 7271 if (!Latch) 7272 return getCouldNotCompute(); 7273 7274 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7275 if (!Predecessor) 7276 return getCouldNotCompute(); 7277 7278 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7279 // Return LHS in OutLHS and shift_opt in OutOpCode. 7280 auto MatchPositiveShift = 7281 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7282 7283 using namespace PatternMatch; 7284 7285 ConstantInt *ShiftAmt; 7286 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7287 OutOpCode = Instruction::LShr; 7288 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7289 OutOpCode = Instruction::AShr; 7290 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7291 OutOpCode = Instruction::Shl; 7292 else 7293 return false; 7294 7295 return ShiftAmt->getValue().isStrictlyPositive(); 7296 }; 7297 7298 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7299 // 7300 // loop: 7301 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7302 // %iv.shifted = lshr i32 %iv, <positive constant> 7303 // 7304 // Return true on a successful match. Return the corresponding PHI node (%iv 7305 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7306 auto MatchShiftRecurrence = 7307 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7308 Optional<Instruction::BinaryOps> PostShiftOpCode; 7309 7310 { 7311 Instruction::BinaryOps OpC; 7312 Value *V; 7313 7314 // If we encounter a shift instruction, "peel off" the shift operation, 7315 // and remember that we did so. Later when we inspect %iv's backedge 7316 // value, we will make sure that the backedge value uses the same 7317 // operation. 7318 // 7319 // Note: the peeled shift operation does not have to be the same 7320 // instruction as the one feeding into the PHI's backedge value. We only 7321 // really care about it being the same *kind* of shift instruction -- 7322 // that's all that is required for our later inferences to hold. 7323 if (MatchPositiveShift(LHS, V, OpC)) { 7324 PostShiftOpCode = OpC; 7325 LHS = V; 7326 } 7327 } 7328 7329 PNOut = dyn_cast<PHINode>(LHS); 7330 if (!PNOut || PNOut->getParent() != L->getHeader()) 7331 return false; 7332 7333 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7334 Value *OpLHS; 7335 7336 return 7337 // The backedge value for the PHI node must be a shift by a positive 7338 // amount 7339 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7340 7341 // of the PHI node itself 7342 OpLHS == PNOut && 7343 7344 // and the kind of shift should be match the kind of shift we peeled 7345 // off, if any. 7346 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7347 }; 7348 7349 PHINode *PN; 7350 Instruction::BinaryOps OpCode; 7351 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7352 return getCouldNotCompute(); 7353 7354 const DataLayout &DL = getDataLayout(); 7355 7356 // The key rationale for this optimization is that for some kinds of shift 7357 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7358 // within a finite number of iterations. If the condition guarding the 7359 // backedge (in the sense that the backedge is taken if the condition is true) 7360 // is false for the value the shift recurrence stabilizes to, then we know 7361 // that the backedge is taken only a finite number of times. 7362 7363 ConstantInt *StableValue = nullptr; 7364 switch (OpCode) { 7365 default: 7366 llvm_unreachable("Impossible case!"); 7367 7368 case Instruction::AShr: { 7369 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7370 // bitwidth(K) iterations. 7371 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7372 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7373 Predecessor->getTerminator(), &DT); 7374 auto *Ty = cast<IntegerType>(RHS->getType()); 7375 if (Known.isNonNegative()) 7376 StableValue = ConstantInt::get(Ty, 0); 7377 else if (Known.isNegative()) 7378 StableValue = ConstantInt::get(Ty, -1, true); 7379 else 7380 return getCouldNotCompute(); 7381 7382 break; 7383 } 7384 case Instruction::LShr: 7385 case Instruction::Shl: 7386 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7387 // stabilize to 0 in at most bitwidth(K) iterations. 7388 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7389 break; 7390 } 7391 7392 auto *Result = 7393 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7394 assert(Result->getType()->isIntegerTy(1) && 7395 "Otherwise cannot be an operand to a branch instruction"); 7396 7397 if (Result->isZeroValue()) { 7398 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7399 const SCEV *UpperBound = 7400 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7401 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7402 } 7403 7404 return getCouldNotCompute(); 7405 } 7406 7407 /// Return true if we can constant fold an instruction of the specified type, 7408 /// assuming that all operands were constants. 7409 static bool CanConstantFold(const Instruction *I) { 7410 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7411 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7412 isa<LoadInst>(I)) 7413 return true; 7414 7415 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7416 if (const Function *F = CI->getCalledFunction()) 7417 return canConstantFoldCallTo(CI, F); 7418 return false; 7419 } 7420 7421 /// Determine whether this instruction can constant evolve within this loop 7422 /// assuming its operands can all constant evolve. 7423 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7424 // An instruction outside of the loop can't be derived from a loop PHI. 7425 if (!L->contains(I)) return false; 7426 7427 if (isa<PHINode>(I)) { 7428 // We don't currently keep track of the control flow needed to evaluate 7429 // PHIs, so we cannot handle PHIs inside of loops. 7430 return L->getHeader() == I->getParent(); 7431 } 7432 7433 // If we won't be able to constant fold this expression even if the operands 7434 // are constants, bail early. 7435 return CanConstantFold(I); 7436 } 7437 7438 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7439 /// recursing through each instruction operand until reaching a loop header phi. 7440 static PHINode * 7441 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7442 DenseMap<Instruction *, PHINode *> &PHIMap, 7443 unsigned Depth) { 7444 if (Depth > MaxConstantEvolvingDepth) 7445 return nullptr; 7446 7447 // Otherwise, we can evaluate this instruction if all of its operands are 7448 // constant or derived from a PHI node themselves. 7449 PHINode *PHI = nullptr; 7450 for (Value *Op : UseInst->operands()) { 7451 if (isa<Constant>(Op)) continue; 7452 7453 Instruction *OpInst = dyn_cast<Instruction>(Op); 7454 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7455 7456 PHINode *P = dyn_cast<PHINode>(OpInst); 7457 if (!P) 7458 // If this operand is already visited, reuse the prior result. 7459 // We may have P != PHI if this is the deepest point at which the 7460 // inconsistent paths meet. 7461 P = PHIMap.lookup(OpInst); 7462 if (!P) { 7463 // Recurse and memoize the results, whether a phi is found or not. 7464 // This recursive call invalidates pointers into PHIMap. 7465 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7466 PHIMap[OpInst] = P; 7467 } 7468 if (!P) 7469 return nullptr; // Not evolving from PHI 7470 if (PHI && PHI != P) 7471 return nullptr; // Evolving from multiple different PHIs. 7472 PHI = P; 7473 } 7474 // This is a expression evolving from a constant PHI! 7475 return PHI; 7476 } 7477 7478 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7479 /// in the loop that V is derived from. We allow arbitrary operations along the 7480 /// way, but the operands of an operation must either be constants or a value 7481 /// derived from a constant PHI. If this expression does not fit with these 7482 /// constraints, return null. 7483 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7484 Instruction *I = dyn_cast<Instruction>(V); 7485 if (!I || !canConstantEvolve(I, L)) return nullptr; 7486 7487 if (PHINode *PN = dyn_cast<PHINode>(I)) 7488 return PN; 7489 7490 // Record non-constant instructions contained by the loop. 7491 DenseMap<Instruction *, PHINode *> PHIMap; 7492 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7493 } 7494 7495 /// EvaluateExpression - Given an expression that passes the 7496 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7497 /// in the loop has the value PHIVal. If we can't fold this expression for some 7498 /// reason, return null. 7499 static Constant *EvaluateExpression(Value *V, const Loop *L, 7500 DenseMap<Instruction *, Constant *> &Vals, 7501 const DataLayout &DL, 7502 const TargetLibraryInfo *TLI) { 7503 // Convenient constant check, but redundant for recursive calls. 7504 if (Constant *C = dyn_cast<Constant>(V)) return C; 7505 Instruction *I = dyn_cast<Instruction>(V); 7506 if (!I) return nullptr; 7507 7508 if (Constant *C = Vals.lookup(I)) return C; 7509 7510 // An instruction inside the loop depends on a value outside the loop that we 7511 // weren't given a mapping for, or a value such as a call inside the loop. 7512 if (!canConstantEvolve(I, L)) return nullptr; 7513 7514 // An unmapped PHI can be due to a branch or another loop inside this loop, 7515 // or due to this not being the initial iteration through a loop where we 7516 // couldn't compute the evolution of this particular PHI last time. 7517 if (isa<PHINode>(I)) return nullptr; 7518 7519 std::vector<Constant*> Operands(I->getNumOperands()); 7520 7521 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7522 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7523 if (!Operand) { 7524 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7525 if (!Operands[i]) return nullptr; 7526 continue; 7527 } 7528 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7529 Vals[Operand] = C; 7530 if (!C) return nullptr; 7531 Operands[i] = C; 7532 } 7533 7534 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7535 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7536 Operands[1], DL, TLI); 7537 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7538 if (!LI->isVolatile()) 7539 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7540 } 7541 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7542 } 7543 7544 7545 // If every incoming value to PN except the one for BB is a specific Constant, 7546 // return that, else return nullptr. 7547 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7548 Constant *IncomingVal = nullptr; 7549 7550 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7551 if (PN->getIncomingBlock(i) == BB) 7552 continue; 7553 7554 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7555 if (!CurrentVal) 7556 return nullptr; 7557 7558 if (IncomingVal != CurrentVal) { 7559 if (IncomingVal) 7560 return nullptr; 7561 IncomingVal = CurrentVal; 7562 } 7563 } 7564 7565 return IncomingVal; 7566 } 7567 7568 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7569 /// in the header of its containing loop, we know the loop executes a 7570 /// constant number of times, and the PHI node is just a recurrence 7571 /// involving constants, fold it. 7572 Constant * 7573 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7574 const APInt &BEs, 7575 const Loop *L) { 7576 auto I = ConstantEvolutionLoopExitValue.find(PN); 7577 if (I != ConstantEvolutionLoopExitValue.end()) 7578 return I->second; 7579 7580 if (BEs.ugt(MaxBruteForceIterations)) 7581 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7582 7583 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7584 7585 DenseMap<Instruction *, Constant *> CurrentIterVals; 7586 BasicBlock *Header = L->getHeader(); 7587 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7588 7589 BasicBlock *Latch = L->getLoopLatch(); 7590 if (!Latch) 7591 return nullptr; 7592 7593 for (auto &I : *Header) { 7594 PHINode *PHI = dyn_cast<PHINode>(&I); 7595 if (!PHI) break; 7596 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7597 if (!StartCST) continue; 7598 CurrentIterVals[PHI] = StartCST; 7599 } 7600 if (!CurrentIterVals.count(PN)) 7601 return RetVal = nullptr; 7602 7603 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7604 7605 // Execute the loop symbolically to determine the exit value. 7606 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7607 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7608 7609 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7610 unsigned IterationNum = 0; 7611 const DataLayout &DL = getDataLayout(); 7612 for (; ; ++IterationNum) { 7613 if (IterationNum == NumIterations) 7614 return RetVal = CurrentIterVals[PN]; // Got exit value! 7615 7616 // Compute the value of the PHIs for the next iteration. 7617 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7618 DenseMap<Instruction *, Constant *> NextIterVals; 7619 Constant *NextPHI = 7620 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7621 if (!NextPHI) 7622 return nullptr; // Couldn't evaluate! 7623 NextIterVals[PN] = NextPHI; 7624 7625 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7626 7627 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7628 // cease to be able to evaluate one of them or if they stop evolving, 7629 // because that doesn't necessarily prevent us from computing PN. 7630 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7631 for (const auto &I : CurrentIterVals) { 7632 PHINode *PHI = dyn_cast<PHINode>(I.first); 7633 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7634 PHIsToCompute.emplace_back(PHI, I.second); 7635 } 7636 // We use two distinct loops because EvaluateExpression may invalidate any 7637 // iterators into CurrentIterVals. 7638 for (const auto &I : PHIsToCompute) { 7639 PHINode *PHI = I.first; 7640 Constant *&NextPHI = NextIterVals[PHI]; 7641 if (!NextPHI) { // Not already computed. 7642 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7643 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7644 } 7645 if (NextPHI != I.second) 7646 StoppedEvolving = false; 7647 } 7648 7649 // If all entries in CurrentIterVals == NextIterVals then we can stop 7650 // iterating, the loop can't continue to change. 7651 if (StoppedEvolving) 7652 return RetVal = CurrentIterVals[PN]; 7653 7654 CurrentIterVals.swap(NextIterVals); 7655 } 7656 } 7657 7658 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7659 Value *Cond, 7660 bool ExitWhen) { 7661 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7662 if (!PN) return getCouldNotCompute(); 7663 7664 // If the loop is canonicalized, the PHI will have exactly two entries. 7665 // That's the only form we support here. 7666 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7667 7668 DenseMap<Instruction *, Constant *> CurrentIterVals; 7669 BasicBlock *Header = L->getHeader(); 7670 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7671 7672 BasicBlock *Latch = L->getLoopLatch(); 7673 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7674 7675 for (auto &I : *Header) { 7676 PHINode *PHI = dyn_cast<PHINode>(&I); 7677 if (!PHI) 7678 break; 7679 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7680 if (!StartCST) continue; 7681 CurrentIterVals[PHI] = StartCST; 7682 } 7683 if (!CurrentIterVals.count(PN)) 7684 return getCouldNotCompute(); 7685 7686 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7687 // the loop symbolically to determine when the condition gets a value of 7688 // "ExitWhen". 7689 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7690 const DataLayout &DL = getDataLayout(); 7691 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7692 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7693 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7694 7695 // Couldn't symbolically evaluate. 7696 if (!CondVal) return getCouldNotCompute(); 7697 7698 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7699 ++NumBruteForceTripCountsComputed; 7700 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7701 } 7702 7703 // Update all the PHI nodes for the next iteration. 7704 DenseMap<Instruction *, Constant *> NextIterVals; 7705 7706 // Create a list of which PHIs we need to compute. We want to do this before 7707 // calling EvaluateExpression on them because that may invalidate iterators 7708 // into CurrentIterVals. 7709 SmallVector<PHINode *, 8> PHIsToCompute; 7710 for (const auto &I : CurrentIterVals) { 7711 PHINode *PHI = dyn_cast<PHINode>(I.first); 7712 if (!PHI || PHI->getParent() != Header) continue; 7713 PHIsToCompute.push_back(PHI); 7714 } 7715 for (PHINode *PHI : PHIsToCompute) { 7716 Constant *&NextPHI = NextIterVals[PHI]; 7717 if (NextPHI) continue; // Already computed! 7718 7719 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7720 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7721 } 7722 CurrentIterVals.swap(NextIterVals); 7723 } 7724 7725 // Too many iterations were needed to evaluate. 7726 return getCouldNotCompute(); 7727 } 7728 7729 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7730 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7731 ValuesAtScopes[V]; 7732 // Check to see if we've folded this expression at this loop before. 7733 for (auto &LS : Values) 7734 if (LS.first == L) 7735 return LS.second ? LS.second : V; 7736 7737 Values.emplace_back(L, nullptr); 7738 7739 // Otherwise compute it. 7740 const SCEV *C = computeSCEVAtScope(V, L); 7741 for (auto &LS : reverse(ValuesAtScopes[V])) 7742 if (LS.first == L) { 7743 LS.second = C; 7744 break; 7745 } 7746 return C; 7747 } 7748 7749 /// This builds up a Constant using the ConstantExpr interface. That way, we 7750 /// will return Constants for objects which aren't represented by a 7751 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7752 /// Returns NULL if the SCEV isn't representable as a Constant. 7753 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7754 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7755 case scCouldNotCompute: 7756 case scAddRecExpr: 7757 break; 7758 case scConstant: 7759 return cast<SCEVConstant>(V)->getValue(); 7760 case scUnknown: 7761 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7762 case scSignExtend: { 7763 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7764 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7765 return ConstantExpr::getSExt(CastOp, SS->getType()); 7766 break; 7767 } 7768 case scZeroExtend: { 7769 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7770 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7771 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7772 break; 7773 } 7774 case scTruncate: { 7775 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7776 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7777 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7778 break; 7779 } 7780 case scAddExpr: { 7781 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7782 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7783 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7784 unsigned AS = PTy->getAddressSpace(); 7785 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7786 C = ConstantExpr::getBitCast(C, DestPtrTy); 7787 } 7788 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7789 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7790 if (!C2) return nullptr; 7791 7792 // First pointer! 7793 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7794 unsigned AS = C2->getType()->getPointerAddressSpace(); 7795 std::swap(C, C2); 7796 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7797 // The offsets have been converted to bytes. We can add bytes to an 7798 // i8* by GEP with the byte count in the first index. 7799 C = ConstantExpr::getBitCast(C, DestPtrTy); 7800 } 7801 7802 // Don't bother trying to sum two pointers. We probably can't 7803 // statically compute a load that results from it anyway. 7804 if (C2->getType()->isPointerTy()) 7805 return nullptr; 7806 7807 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7808 if (PTy->getElementType()->isStructTy()) 7809 C2 = ConstantExpr::getIntegerCast( 7810 C2, Type::getInt32Ty(C->getContext()), true); 7811 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7812 } else 7813 C = ConstantExpr::getAdd(C, C2); 7814 } 7815 return C; 7816 } 7817 break; 7818 } 7819 case scMulExpr: { 7820 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7821 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7822 // Don't bother with pointers at all. 7823 if (C->getType()->isPointerTy()) return nullptr; 7824 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7825 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7826 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7827 C = ConstantExpr::getMul(C, C2); 7828 } 7829 return C; 7830 } 7831 break; 7832 } 7833 case scUDivExpr: { 7834 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7835 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7836 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7837 if (LHS->getType() == RHS->getType()) 7838 return ConstantExpr::getUDiv(LHS, RHS); 7839 break; 7840 } 7841 case scSMaxExpr: 7842 case scUMaxExpr: 7843 break; // TODO: smax, umax. 7844 } 7845 return nullptr; 7846 } 7847 7848 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7849 if (isa<SCEVConstant>(V)) return V; 7850 7851 // If this instruction is evolved from a constant-evolving PHI, compute the 7852 // exit value from the loop without using SCEVs. 7853 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 7854 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 7855 const Loop *LI = this->LI[I->getParent()]; 7856 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 7857 if (PHINode *PN = dyn_cast<PHINode>(I)) 7858 if (PN->getParent() == LI->getHeader()) { 7859 // Okay, there is no closed form solution for the PHI node. Check 7860 // to see if the loop that contains it has a known backedge-taken 7861 // count. If so, we may be able to force computation of the exit 7862 // value. 7863 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 7864 if (const SCEVConstant *BTCC = 7865 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 7866 7867 // This trivial case can show up in some degenerate cases where 7868 // the incoming IR has not yet been fully simplified. 7869 if (BTCC->getValue()->isZero()) { 7870 Value *InitValue = nullptr; 7871 bool MultipleInitValues = false; 7872 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 7873 if (!LI->contains(PN->getIncomingBlock(i))) { 7874 if (!InitValue) 7875 InitValue = PN->getIncomingValue(i); 7876 else if (InitValue != PN->getIncomingValue(i)) { 7877 MultipleInitValues = true; 7878 break; 7879 } 7880 } 7881 if (!MultipleInitValues && InitValue) 7882 return getSCEV(InitValue); 7883 } 7884 } 7885 // Okay, we know how many times the containing loop executes. If 7886 // this is a constant evolving PHI node, get the final value at 7887 // the specified iteration number. 7888 Constant *RV = 7889 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 7890 if (RV) return getSCEV(RV); 7891 } 7892 } 7893 7894 // Okay, this is an expression that we cannot symbolically evaluate 7895 // into a SCEV. Check to see if it's possible to symbolically evaluate 7896 // the arguments into constants, and if so, try to constant propagate the 7897 // result. This is particularly useful for computing loop exit values. 7898 if (CanConstantFold(I)) { 7899 SmallVector<Constant *, 4> Operands; 7900 bool MadeImprovement = false; 7901 for (Value *Op : I->operands()) { 7902 if (Constant *C = dyn_cast<Constant>(Op)) { 7903 Operands.push_back(C); 7904 continue; 7905 } 7906 7907 // If any of the operands is non-constant and if they are 7908 // non-integer and non-pointer, don't even try to analyze them 7909 // with scev techniques. 7910 if (!isSCEVable(Op->getType())) 7911 return V; 7912 7913 const SCEV *OrigV = getSCEV(Op); 7914 const SCEV *OpV = getSCEVAtScope(OrigV, L); 7915 MadeImprovement |= OrigV != OpV; 7916 7917 Constant *C = BuildConstantFromSCEV(OpV); 7918 if (!C) return V; 7919 if (C->getType() != Op->getType()) 7920 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 7921 Op->getType(), 7922 false), 7923 C, Op->getType()); 7924 Operands.push_back(C); 7925 } 7926 7927 // Check to see if getSCEVAtScope actually made an improvement. 7928 if (MadeImprovement) { 7929 Constant *C = nullptr; 7930 const DataLayout &DL = getDataLayout(); 7931 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 7932 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7933 Operands[1], DL, &TLI); 7934 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 7935 if (!LI->isVolatile()) 7936 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7937 } else 7938 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 7939 if (!C) return V; 7940 return getSCEV(C); 7941 } 7942 } 7943 } 7944 7945 // This is some other type of SCEVUnknown, just return it. 7946 return V; 7947 } 7948 7949 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 7950 // Avoid performing the look-up in the common case where the specified 7951 // expression has no loop-variant portions. 7952 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 7953 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7954 if (OpAtScope != Comm->getOperand(i)) { 7955 // Okay, at least one of these operands is loop variant but might be 7956 // foldable. Build a new instance of the folded commutative expression. 7957 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 7958 Comm->op_begin()+i); 7959 NewOps.push_back(OpAtScope); 7960 7961 for (++i; i != e; ++i) { 7962 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7963 NewOps.push_back(OpAtScope); 7964 } 7965 if (isa<SCEVAddExpr>(Comm)) 7966 return getAddExpr(NewOps); 7967 if (isa<SCEVMulExpr>(Comm)) 7968 return getMulExpr(NewOps); 7969 if (isa<SCEVSMaxExpr>(Comm)) 7970 return getSMaxExpr(NewOps); 7971 if (isa<SCEVUMaxExpr>(Comm)) 7972 return getUMaxExpr(NewOps); 7973 llvm_unreachable("Unknown commutative SCEV type!"); 7974 } 7975 } 7976 // If we got here, all operands are loop invariant. 7977 return Comm; 7978 } 7979 7980 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 7981 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 7982 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 7983 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 7984 return Div; // must be loop invariant 7985 return getUDivExpr(LHS, RHS); 7986 } 7987 7988 // If this is a loop recurrence for a loop that does not contain L, then we 7989 // are dealing with the final value computed by the loop. 7990 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 7991 // First, attempt to evaluate each operand. 7992 // Avoid performing the look-up in the common case where the specified 7993 // expression has no loop-variant portions. 7994 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 7995 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 7996 if (OpAtScope == AddRec->getOperand(i)) 7997 continue; 7998 7999 // Okay, at least one of these operands is loop variant but might be 8000 // foldable. Build a new instance of the folded commutative expression. 8001 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8002 AddRec->op_begin()+i); 8003 NewOps.push_back(OpAtScope); 8004 for (++i; i != e; ++i) 8005 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8006 8007 const SCEV *FoldedRec = 8008 getAddRecExpr(NewOps, AddRec->getLoop(), 8009 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8010 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8011 // The addrec may be folded to a nonrecurrence, for example, if the 8012 // induction variable is multiplied by zero after constant folding. Go 8013 // ahead and return the folded value. 8014 if (!AddRec) 8015 return FoldedRec; 8016 break; 8017 } 8018 8019 // If the scope is outside the addrec's loop, evaluate it by using the 8020 // loop exit value of the addrec. 8021 if (!AddRec->getLoop()->contains(L)) { 8022 // To evaluate this recurrence, we need to know how many times the AddRec 8023 // loop iterates. Compute this now. 8024 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8025 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8026 8027 // Then, evaluate the AddRec. 8028 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8029 } 8030 8031 return AddRec; 8032 } 8033 8034 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8035 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8036 if (Op == Cast->getOperand()) 8037 return Cast; // must be loop invariant 8038 return getZeroExtendExpr(Op, Cast->getType()); 8039 } 8040 8041 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8042 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8043 if (Op == Cast->getOperand()) 8044 return Cast; // must be loop invariant 8045 return getSignExtendExpr(Op, Cast->getType()); 8046 } 8047 8048 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8049 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8050 if (Op == Cast->getOperand()) 8051 return Cast; // must be loop invariant 8052 return getTruncateExpr(Op, Cast->getType()); 8053 } 8054 8055 llvm_unreachable("Unknown SCEV type!"); 8056 } 8057 8058 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8059 return getSCEVAtScope(getSCEV(V), L); 8060 } 8061 8062 /// Finds the minimum unsigned root of the following equation: 8063 /// 8064 /// A * X = B (mod N) 8065 /// 8066 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8067 /// A and B isn't important. 8068 /// 8069 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8070 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8071 ScalarEvolution &SE) { 8072 uint32_t BW = A.getBitWidth(); 8073 assert(BW == SE.getTypeSizeInBits(B->getType())); 8074 assert(A != 0 && "A must be non-zero."); 8075 8076 // 1. D = gcd(A, N) 8077 // 8078 // The gcd of A and N may have only one prime factor: 2. The number of 8079 // trailing zeros in A is its multiplicity 8080 uint32_t Mult2 = A.countTrailingZeros(); 8081 // D = 2^Mult2 8082 8083 // 2. Check if B is divisible by D. 8084 // 8085 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8086 // is not less than multiplicity of this prime factor for D. 8087 if (SE.GetMinTrailingZeros(B) < Mult2) 8088 return SE.getCouldNotCompute(); 8089 8090 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8091 // modulo (N / D). 8092 // 8093 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8094 // (N / D) in general. The inverse itself always fits into BW bits, though, 8095 // so we immediately truncate it. 8096 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8097 APInt Mod(BW + 1, 0); 8098 Mod.setBit(BW - Mult2); // Mod = N / D 8099 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8100 8101 // 4. Compute the minimum unsigned root of the equation: 8102 // I * (B / D) mod (N / D) 8103 // To simplify the computation, we factor out the divide by D: 8104 // (I * B mod N) / D 8105 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8106 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8107 } 8108 8109 /// Find the roots of the quadratic equation for the given quadratic chrec 8110 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 8111 /// two SCEVCouldNotCompute objects. 8112 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 8113 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8114 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8115 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8116 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8117 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8118 8119 // We currently can only solve this if the coefficients are constants. 8120 if (!LC || !MC || !NC) 8121 return None; 8122 8123 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 8124 const APInt &L = LC->getAPInt(); 8125 const APInt &M = MC->getAPInt(); 8126 const APInt &N = NC->getAPInt(); 8127 APInt Two(BitWidth, 2); 8128 8129 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 8130 8131 // The A coefficient is N/2 8132 APInt A = N.sdiv(Two); 8133 8134 // The B coefficient is M-N/2 8135 APInt B = M; 8136 B -= A; // A is the same as N/2. 8137 8138 // The C coefficient is L. 8139 const APInt& C = L; 8140 8141 // Compute the B^2-4ac term. 8142 APInt SqrtTerm = B; 8143 SqrtTerm *= B; 8144 SqrtTerm -= 4 * (A * C); 8145 8146 if (SqrtTerm.isNegative()) { 8147 // The loop is provably infinite. 8148 return None; 8149 } 8150 8151 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 8152 // integer value or else APInt::sqrt() will assert. 8153 APInt SqrtVal = SqrtTerm.sqrt(); 8154 8155 // Compute the two solutions for the quadratic formula. 8156 // The divisions must be performed as signed divisions. 8157 APInt NegB = -std::move(B); 8158 APInt TwoA = std::move(A); 8159 TwoA <<= 1; 8160 if (TwoA.isNullValue()) 8161 return None; 8162 8163 LLVMContext &Context = SE.getContext(); 8164 8165 ConstantInt *Solution1 = 8166 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 8167 ConstantInt *Solution2 = 8168 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 8169 8170 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 8171 cast<SCEVConstant>(SE.getConstant(Solution2))); 8172 } 8173 8174 ScalarEvolution::ExitLimit 8175 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8176 bool AllowPredicates) { 8177 8178 // This is only used for loops with a "x != y" exit test. The exit condition 8179 // is now expressed as a single expression, V = x-y. So the exit test is 8180 // effectively V != 0. We know and take advantage of the fact that this 8181 // expression only being used in a comparison by zero context. 8182 8183 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8184 // If the value is a constant 8185 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8186 // If the value is already zero, the branch will execute zero times. 8187 if (C->getValue()->isZero()) return C; 8188 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8189 } 8190 8191 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 8192 if (!AddRec && AllowPredicates) 8193 // Try to make this an AddRec using runtime tests, in the first X 8194 // iterations of this loop, where X is the SCEV expression found by the 8195 // algorithm below. 8196 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8197 8198 if (!AddRec || AddRec->getLoop() != L) 8199 return getCouldNotCompute(); 8200 8201 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8202 // the quadratic equation to solve it. 8203 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8204 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 8205 const SCEVConstant *R1 = Roots->first; 8206 const SCEVConstant *R2 = Roots->second; 8207 // Pick the smallest positive root value. 8208 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8209 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8210 if (!CB->getZExtValue()) 8211 std::swap(R1, R2); // R1 is the minimum root now. 8212 8213 // We can only use this value if the chrec ends up with an exact zero 8214 // value at this index. When solving for "X*X != 5", for example, we 8215 // should not accept a root of 2. 8216 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 8217 if (Val->isZero()) 8218 // We found a quadratic root! 8219 return ExitLimit(R1, R1, false, Predicates); 8220 } 8221 } 8222 return getCouldNotCompute(); 8223 } 8224 8225 // Otherwise we can only handle this if it is affine. 8226 if (!AddRec->isAffine()) 8227 return getCouldNotCompute(); 8228 8229 // If this is an affine expression, the execution count of this branch is 8230 // the minimum unsigned root of the following equation: 8231 // 8232 // Start + Step*N = 0 (mod 2^BW) 8233 // 8234 // equivalent to: 8235 // 8236 // Step*N = -Start (mod 2^BW) 8237 // 8238 // where BW is the common bit width of Start and Step. 8239 8240 // Get the initial value for the loop. 8241 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8242 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8243 8244 // For now we handle only constant steps. 8245 // 8246 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8247 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8248 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8249 // We have not yet seen any such cases. 8250 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8251 if (!StepC || StepC->getValue()->isZero()) 8252 return getCouldNotCompute(); 8253 8254 // For positive steps (counting up until unsigned overflow): 8255 // N = -Start/Step (as unsigned) 8256 // For negative steps (counting down to zero): 8257 // N = Start/-Step 8258 // First compute the unsigned distance from zero in the direction of Step. 8259 bool CountDown = StepC->getAPInt().isNegative(); 8260 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8261 8262 // Handle unitary steps, which cannot wraparound. 8263 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8264 // N = Distance (as unsigned) 8265 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8266 APInt MaxBECount = getUnsignedRangeMax(Distance); 8267 8268 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8269 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8270 // case, and see if we can improve the bound. 8271 // 8272 // Explicitly handling this here is necessary because getUnsignedRange 8273 // isn't context-sensitive; it doesn't know that we only care about the 8274 // range inside the loop. 8275 const SCEV *Zero = getZero(Distance->getType()); 8276 const SCEV *One = getOne(Distance->getType()); 8277 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8278 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8279 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8280 // as "unsigned_max(Distance + 1) - 1". 8281 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8282 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8283 } 8284 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8285 } 8286 8287 // If the condition controls loop exit (the loop exits only if the expression 8288 // is true) and the addition is no-wrap we can use unsigned divide to 8289 // compute the backedge count. In this case, the step may not divide the 8290 // distance, but we don't care because if the condition is "missed" the loop 8291 // will have undefined behavior due to wrapping. 8292 if (ControlsExit && AddRec->hasNoSelfWrap() && 8293 loopHasNoAbnormalExits(AddRec->getLoop())) { 8294 const SCEV *Exact = 8295 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8296 const SCEV *Max = 8297 Exact == getCouldNotCompute() 8298 ? Exact 8299 : getConstant(getUnsignedRangeMax(Exact)); 8300 return ExitLimit(Exact, Max, false, Predicates); 8301 } 8302 8303 // Solve the general equation. 8304 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8305 getNegativeSCEV(Start), *this); 8306 const SCEV *M = E == getCouldNotCompute() 8307 ? E 8308 : getConstant(getUnsignedRangeMax(E)); 8309 return ExitLimit(E, M, false, Predicates); 8310 } 8311 8312 ScalarEvolution::ExitLimit 8313 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8314 // Loops that look like: while (X == 0) are very strange indeed. We don't 8315 // handle them yet except for the trivial case. This could be expanded in the 8316 // future as needed. 8317 8318 // If the value is a constant, check to see if it is known to be non-zero 8319 // already. If so, the backedge will execute zero times. 8320 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8321 if (!C->getValue()->isZero()) 8322 return getZero(C->getType()); 8323 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8324 } 8325 8326 // We could implement others, but I really doubt anyone writes loops like 8327 // this, and if they did, they would already be constant folded. 8328 return getCouldNotCompute(); 8329 } 8330 8331 std::pair<BasicBlock *, BasicBlock *> 8332 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8333 // If the block has a unique predecessor, then there is no path from the 8334 // predecessor to the block that does not go through the direct edge 8335 // from the predecessor to the block. 8336 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8337 return {Pred, BB}; 8338 8339 // A loop's header is defined to be a block that dominates the loop. 8340 // If the header has a unique predecessor outside the loop, it must be 8341 // a block that has exactly one successor that can reach the loop. 8342 if (Loop *L = LI.getLoopFor(BB)) 8343 return {L->getLoopPredecessor(), L->getHeader()}; 8344 8345 return {nullptr, nullptr}; 8346 } 8347 8348 /// SCEV structural equivalence is usually sufficient for testing whether two 8349 /// expressions are equal, however for the purposes of looking for a condition 8350 /// guarding a loop, it can be useful to be a little more general, since a 8351 /// front-end may have replicated the controlling expression. 8352 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8353 // Quick check to see if they are the same SCEV. 8354 if (A == B) return true; 8355 8356 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8357 // Not all instructions that are "identical" compute the same value. For 8358 // instance, two distinct alloca instructions allocating the same type are 8359 // identical and do not read memory; but compute distinct values. 8360 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8361 }; 8362 8363 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8364 // two different instructions with the same value. Check for this case. 8365 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8366 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8367 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8368 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8369 if (ComputesEqualValues(AI, BI)) 8370 return true; 8371 8372 // Otherwise assume they may have a different value. 8373 return false; 8374 } 8375 8376 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8377 const SCEV *&LHS, const SCEV *&RHS, 8378 unsigned Depth) { 8379 bool Changed = false; 8380 8381 // If we hit the max recursion limit bail out. 8382 if (Depth >= 3) 8383 return false; 8384 8385 // Canonicalize a constant to the right side. 8386 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8387 // Check for both operands constant. 8388 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8389 if (ConstantExpr::getICmp(Pred, 8390 LHSC->getValue(), 8391 RHSC->getValue())->isNullValue()) 8392 goto trivially_false; 8393 else 8394 goto trivially_true; 8395 } 8396 // Otherwise swap the operands to put the constant on the right. 8397 std::swap(LHS, RHS); 8398 Pred = ICmpInst::getSwappedPredicate(Pred); 8399 Changed = true; 8400 } 8401 8402 // If we're comparing an addrec with a value which is loop-invariant in the 8403 // addrec's loop, put the addrec on the left. Also make a dominance check, 8404 // as both operands could be addrecs loop-invariant in each other's loop. 8405 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8406 const Loop *L = AR->getLoop(); 8407 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8408 std::swap(LHS, RHS); 8409 Pred = ICmpInst::getSwappedPredicate(Pred); 8410 Changed = true; 8411 } 8412 } 8413 8414 // If there's a constant operand, canonicalize comparisons with boundary 8415 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8416 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8417 const APInt &RA = RC->getAPInt(); 8418 8419 bool SimplifiedByConstantRange = false; 8420 8421 if (!ICmpInst::isEquality(Pred)) { 8422 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8423 if (ExactCR.isFullSet()) 8424 goto trivially_true; 8425 else if (ExactCR.isEmptySet()) 8426 goto trivially_false; 8427 8428 APInt NewRHS; 8429 CmpInst::Predicate NewPred; 8430 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8431 ICmpInst::isEquality(NewPred)) { 8432 // We were able to convert an inequality to an equality. 8433 Pred = NewPred; 8434 RHS = getConstant(NewRHS); 8435 Changed = SimplifiedByConstantRange = true; 8436 } 8437 } 8438 8439 if (!SimplifiedByConstantRange) { 8440 switch (Pred) { 8441 default: 8442 break; 8443 case ICmpInst::ICMP_EQ: 8444 case ICmpInst::ICMP_NE: 8445 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8446 if (!RA) 8447 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8448 if (const SCEVMulExpr *ME = 8449 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8450 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8451 ME->getOperand(0)->isAllOnesValue()) { 8452 RHS = AE->getOperand(1); 8453 LHS = ME->getOperand(1); 8454 Changed = true; 8455 } 8456 break; 8457 8458 8459 // The "Should have been caught earlier!" messages refer to the fact 8460 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8461 // should have fired on the corresponding cases, and canonicalized the 8462 // check to trivially_true or trivially_false. 8463 8464 case ICmpInst::ICMP_UGE: 8465 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8466 Pred = ICmpInst::ICMP_UGT; 8467 RHS = getConstant(RA - 1); 8468 Changed = true; 8469 break; 8470 case ICmpInst::ICMP_ULE: 8471 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8472 Pred = ICmpInst::ICMP_ULT; 8473 RHS = getConstant(RA + 1); 8474 Changed = true; 8475 break; 8476 case ICmpInst::ICMP_SGE: 8477 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8478 Pred = ICmpInst::ICMP_SGT; 8479 RHS = getConstant(RA - 1); 8480 Changed = true; 8481 break; 8482 case ICmpInst::ICMP_SLE: 8483 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8484 Pred = ICmpInst::ICMP_SLT; 8485 RHS = getConstant(RA + 1); 8486 Changed = true; 8487 break; 8488 } 8489 } 8490 } 8491 8492 // Check for obvious equality. 8493 if (HasSameValue(LHS, RHS)) { 8494 if (ICmpInst::isTrueWhenEqual(Pred)) 8495 goto trivially_true; 8496 if (ICmpInst::isFalseWhenEqual(Pred)) 8497 goto trivially_false; 8498 } 8499 8500 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8501 // adding or subtracting 1 from one of the operands. 8502 switch (Pred) { 8503 case ICmpInst::ICMP_SLE: 8504 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8505 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8506 SCEV::FlagNSW); 8507 Pred = ICmpInst::ICMP_SLT; 8508 Changed = true; 8509 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8510 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8511 SCEV::FlagNSW); 8512 Pred = ICmpInst::ICMP_SLT; 8513 Changed = true; 8514 } 8515 break; 8516 case ICmpInst::ICMP_SGE: 8517 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8518 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8519 SCEV::FlagNSW); 8520 Pred = ICmpInst::ICMP_SGT; 8521 Changed = true; 8522 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8523 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8524 SCEV::FlagNSW); 8525 Pred = ICmpInst::ICMP_SGT; 8526 Changed = true; 8527 } 8528 break; 8529 case ICmpInst::ICMP_ULE: 8530 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8531 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8532 SCEV::FlagNUW); 8533 Pred = ICmpInst::ICMP_ULT; 8534 Changed = true; 8535 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8536 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8537 Pred = ICmpInst::ICMP_ULT; 8538 Changed = true; 8539 } 8540 break; 8541 case ICmpInst::ICMP_UGE: 8542 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8543 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8544 Pred = ICmpInst::ICMP_UGT; 8545 Changed = true; 8546 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8547 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8548 SCEV::FlagNUW); 8549 Pred = ICmpInst::ICMP_UGT; 8550 Changed = true; 8551 } 8552 break; 8553 default: 8554 break; 8555 } 8556 8557 // TODO: More simplifications are possible here. 8558 8559 // Recursively simplify until we either hit a recursion limit or nothing 8560 // changes. 8561 if (Changed) 8562 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8563 8564 return Changed; 8565 8566 trivially_true: 8567 // Return 0 == 0. 8568 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8569 Pred = ICmpInst::ICMP_EQ; 8570 return true; 8571 8572 trivially_false: 8573 // Return 0 != 0. 8574 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8575 Pred = ICmpInst::ICMP_NE; 8576 return true; 8577 } 8578 8579 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 8580 return getSignedRangeMax(S).isNegative(); 8581 } 8582 8583 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 8584 return getSignedRangeMin(S).isStrictlyPositive(); 8585 } 8586 8587 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 8588 return !getSignedRangeMin(S).isNegative(); 8589 } 8590 8591 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 8592 return !getSignedRangeMax(S).isStrictlyPositive(); 8593 } 8594 8595 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 8596 return isKnownNegative(S) || isKnownPositive(S); 8597 } 8598 8599 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 8600 const SCEV *LHS, const SCEV *RHS) { 8601 // Canonicalize the inputs first. 8602 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8603 8604 // If LHS or RHS is an addrec, check to see if the condition is true in 8605 // every iteration of the loop. 8606 // If LHS and RHS are both addrec, both conditions must be true in 8607 // every iteration of the loop. 8608 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8609 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8610 bool LeftGuarded = false; 8611 bool RightGuarded = false; 8612 if (LAR) { 8613 const Loop *L = LAR->getLoop(); 8614 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 8615 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 8616 if (!RAR) return true; 8617 LeftGuarded = true; 8618 } 8619 } 8620 if (RAR) { 8621 const Loop *L = RAR->getLoop(); 8622 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 8623 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 8624 if (!LAR) return true; 8625 RightGuarded = true; 8626 } 8627 } 8628 if (LeftGuarded && RightGuarded) 8629 return true; 8630 8631 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8632 return true; 8633 8634 // Otherwise see what can be done with known constant ranges. 8635 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 8636 } 8637 8638 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8639 ICmpInst::Predicate Pred, 8640 bool &Increasing) { 8641 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8642 8643 #ifndef NDEBUG 8644 // Verify an invariant: inverting the predicate should turn a monotonically 8645 // increasing change to a monotonically decreasing one, and vice versa. 8646 bool IncreasingSwapped; 8647 bool ResultSwapped = isMonotonicPredicateImpl( 8648 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8649 8650 assert(Result == ResultSwapped && "should be able to analyze both!"); 8651 if (ResultSwapped) 8652 assert(Increasing == !IncreasingSwapped && 8653 "monotonicity should flip as we flip the predicate"); 8654 #endif 8655 8656 return Result; 8657 } 8658 8659 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8660 ICmpInst::Predicate Pred, 8661 bool &Increasing) { 8662 8663 // A zero step value for LHS means the induction variable is essentially a 8664 // loop invariant value. We don't really depend on the predicate actually 8665 // flipping from false to true (for increasing predicates, and the other way 8666 // around for decreasing predicates), all we care about is that *if* the 8667 // predicate changes then it only changes from false to true. 8668 // 8669 // A zero step value in itself is not very useful, but there may be places 8670 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8671 // as general as possible. 8672 8673 switch (Pred) { 8674 default: 8675 return false; // Conservative answer 8676 8677 case ICmpInst::ICMP_UGT: 8678 case ICmpInst::ICMP_UGE: 8679 case ICmpInst::ICMP_ULT: 8680 case ICmpInst::ICMP_ULE: 8681 if (!LHS->hasNoUnsignedWrap()) 8682 return false; 8683 8684 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8685 return true; 8686 8687 case ICmpInst::ICMP_SGT: 8688 case ICmpInst::ICMP_SGE: 8689 case ICmpInst::ICMP_SLT: 8690 case ICmpInst::ICMP_SLE: { 8691 if (!LHS->hasNoSignedWrap()) 8692 return false; 8693 8694 const SCEV *Step = LHS->getStepRecurrence(*this); 8695 8696 if (isKnownNonNegative(Step)) { 8697 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8698 return true; 8699 } 8700 8701 if (isKnownNonPositive(Step)) { 8702 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8703 return true; 8704 } 8705 8706 return false; 8707 } 8708 8709 } 8710 8711 llvm_unreachable("switch has default clause!"); 8712 } 8713 8714 bool ScalarEvolution::isLoopInvariantPredicate( 8715 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8716 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8717 const SCEV *&InvariantRHS) { 8718 8719 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8720 if (!isLoopInvariant(RHS, L)) { 8721 if (!isLoopInvariant(LHS, L)) 8722 return false; 8723 8724 std::swap(LHS, RHS); 8725 Pred = ICmpInst::getSwappedPredicate(Pred); 8726 } 8727 8728 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8729 if (!ArLHS || ArLHS->getLoop() != L) 8730 return false; 8731 8732 bool Increasing; 8733 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8734 return false; 8735 8736 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8737 // true as the loop iterates, and the backedge is control dependent on 8738 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8739 // 8740 // * if the predicate was false in the first iteration then the predicate 8741 // is never evaluated again, since the loop exits without taking the 8742 // backedge. 8743 // * if the predicate was true in the first iteration then it will 8744 // continue to be true for all future iterations since it is 8745 // monotonically increasing. 8746 // 8747 // For both the above possibilities, we can replace the loop varying 8748 // predicate with its value on the first iteration of the loop (which is 8749 // loop invariant). 8750 // 8751 // A similar reasoning applies for a monotonically decreasing predicate, by 8752 // replacing true with false and false with true in the above two bullets. 8753 8754 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8755 8756 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8757 return false; 8758 8759 InvariantPred = Pred; 8760 InvariantLHS = ArLHS->getStart(); 8761 InvariantRHS = RHS; 8762 return true; 8763 } 8764 8765 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8766 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8767 if (HasSameValue(LHS, RHS)) 8768 return ICmpInst::isTrueWhenEqual(Pred); 8769 8770 // This code is split out from isKnownPredicate because it is called from 8771 // within isLoopEntryGuardedByCond. 8772 8773 auto CheckRanges = 8774 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8775 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8776 .contains(RangeLHS); 8777 }; 8778 8779 // The check at the top of the function catches the case where the values are 8780 // known to be equal. 8781 if (Pred == CmpInst::ICMP_EQ) 8782 return false; 8783 8784 if (Pred == CmpInst::ICMP_NE) 8785 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8786 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8787 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8788 8789 if (CmpInst::isSigned(Pred)) 8790 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8791 8792 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8793 } 8794 8795 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8796 const SCEV *LHS, 8797 const SCEV *RHS) { 8798 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 8799 // Return Y via OutY. 8800 auto MatchBinaryAddToConst = 8801 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 8802 SCEV::NoWrapFlags ExpectedFlags) { 8803 const SCEV *NonConstOp, *ConstOp; 8804 SCEV::NoWrapFlags FlagsPresent; 8805 8806 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 8807 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 8808 return false; 8809 8810 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 8811 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 8812 }; 8813 8814 APInt C; 8815 8816 switch (Pred) { 8817 default: 8818 break; 8819 8820 case ICmpInst::ICMP_SGE: 8821 std::swap(LHS, RHS); 8822 LLVM_FALLTHROUGH; 8823 case ICmpInst::ICMP_SLE: 8824 // X s<= (X + C)<nsw> if C >= 0 8825 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 8826 return true; 8827 8828 // (X + C)<nsw> s<= X if C <= 0 8829 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 8830 !C.isStrictlyPositive()) 8831 return true; 8832 break; 8833 8834 case ICmpInst::ICMP_SGT: 8835 std::swap(LHS, RHS); 8836 LLVM_FALLTHROUGH; 8837 case ICmpInst::ICMP_SLT: 8838 // X s< (X + C)<nsw> if C > 0 8839 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 8840 C.isStrictlyPositive()) 8841 return true; 8842 8843 // (X + C)<nsw> s< X if C < 0 8844 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 8845 return true; 8846 break; 8847 } 8848 8849 return false; 8850 } 8851 8852 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 8853 const SCEV *LHS, 8854 const SCEV *RHS) { 8855 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 8856 return false; 8857 8858 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 8859 // the stack can result in exponential time complexity. 8860 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 8861 8862 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 8863 // 8864 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 8865 // isKnownPredicate. isKnownPredicate is more powerful, but also more 8866 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 8867 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 8868 // use isKnownPredicate later if needed. 8869 return isKnownNonNegative(RHS) && 8870 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 8871 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 8872 } 8873 8874 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 8875 ICmpInst::Predicate Pred, 8876 const SCEV *LHS, const SCEV *RHS) { 8877 // No need to even try if we know the module has no guards. 8878 if (!HasGuards) 8879 return false; 8880 8881 return any_of(*BB, [&](Instruction &I) { 8882 using namespace llvm::PatternMatch; 8883 8884 Value *Condition; 8885 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 8886 m_Value(Condition))) && 8887 isImpliedCond(Pred, LHS, RHS, Condition, false); 8888 }); 8889 } 8890 8891 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 8892 /// protected by a conditional between LHS and RHS. This is used to 8893 /// to eliminate casts. 8894 bool 8895 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 8896 ICmpInst::Predicate Pred, 8897 const SCEV *LHS, const SCEV *RHS) { 8898 // Interpret a null as meaning no loop, where there is obviously no guard 8899 // (interprocedural conditions notwithstanding). 8900 if (!L) return true; 8901 8902 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8903 return true; 8904 8905 BasicBlock *Latch = L->getLoopLatch(); 8906 if (!Latch) 8907 return false; 8908 8909 BranchInst *LoopContinuePredicate = 8910 dyn_cast<BranchInst>(Latch->getTerminator()); 8911 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 8912 isImpliedCond(Pred, LHS, RHS, 8913 LoopContinuePredicate->getCondition(), 8914 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 8915 return true; 8916 8917 // We don't want more than one activation of the following loops on the stack 8918 // -- that can lead to O(n!) time complexity. 8919 if (WalkingBEDominatingConds) 8920 return false; 8921 8922 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 8923 8924 // See if we can exploit a trip count to prove the predicate. 8925 const auto &BETakenInfo = getBackedgeTakenInfo(L); 8926 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 8927 if (LatchBECount != getCouldNotCompute()) { 8928 // We know that Latch branches back to the loop header exactly 8929 // LatchBECount times. This means the backdege condition at Latch is 8930 // equivalent to "{0,+,1} u< LatchBECount". 8931 Type *Ty = LatchBECount->getType(); 8932 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 8933 const SCEV *LoopCounter = 8934 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 8935 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 8936 LatchBECount)) 8937 return true; 8938 } 8939 8940 // Check conditions due to any @llvm.assume intrinsics. 8941 for (auto &AssumeVH : AC.assumptions()) { 8942 if (!AssumeVH) 8943 continue; 8944 auto *CI = cast<CallInst>(AssumeVH); 8945 if (!DT.dominates(CI, Latch->getTerminator())) 8946 continue; 8947 8948 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8949 return true; 8950 } 8951 8952 // If the loop is not reachable from the entry block, we risk running into an 8953 // infinite loop as we walk up into the dom tree. These loops do not matter 8954 // anyway, so we just return a conservative answer when we see them. 8955 if (!DT.isReachableFromEntry(L->getHeader())) 8956 return false; 8957 8958 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 8959 return true; 8960 8961 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 8962 DTN != HeaderDTN; DTN = DTN->getIDom()) { 8963 assert(DTN && "should reach the loop header before reaching the root!"); 8964 8965 BasicBlock *BB = DTN->getBlock(); 8966 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 8967 return true; 8968 8969 BasicBlock *PBB = BB->getSinglePredecessor(); 8970 if (!PBB) 8971 continue; 8972 8973 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 8974 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 8975 continue; 8976 8977 Value *Condition = ContinuePredicate->getCondition(); 8978 8979 // If we have an edge `E` within the loop body that dominates the only 8980 // latch, the condition guarding `E` also guards the backedge. This 8981 // reasoning works only for loops with a single latch. 8982 8983 BasicBlockEdge DominatingEdge(PBB, BB); 8984 if (DominatingEdge.isSingleEdge()) { 8985 // We're constructively (and conservatively) enumerating edges within the 8986 // loop body that dominate the latch. The dominator tree better agree 8987 // with us on this: 8988 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 8989 8990 if (isImpliedCond(Pred, LHS, RHS, Condition, 8991 BB != ContinuePredicate->getSuccessor(0))) 8992 return true; 8993 } 8994 } 8995 8996 return false; 8997 } 8998 8999 bool 9000 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9001 ICmpInst::Predicate Pred, 9002 const SCEV *LHS, const SCEV *RHS) { 9003 // Interpret a null as meaning no loop, where there is obviously no guard 9004 // (interprocedural conditions notwithstanding). 9005 if (!L) return false; 9006 9007 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 9008 return true; 9009 9010 // Starting at the loop predecessor, climb up the predecessor chain, as long 9011 // as there are predecessors that can be found that have unique successors 9012 // leading to the original header. 9013 for (std::pair<BasicBlock *, BasicBlock *> 9014 Pair(L->getLoopPredecessor(), L->getHeader()); 9015 Pair.first; 9016 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9017 9018 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 9019 return true; 9020 9021 BranchInst *LoopEntryPredicate = 9022 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9023 if (!LoopEntryPredicate || 9024 LoopEntryPredicate->isUnconditional()) 9025 continue; 9026 9027 if (isImpliedCond(Pred, LHS, RHS, 9028 LoopEntryPredicate->getCondition(), 9029 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9030 return true; 9031 } 9032 9033 // Check conditions due to any @llvm.assume intrinsics. 9034 for (auto &AssumeVH : AC.assumptions()) { 9035 if (!AssumeVH) 9036 continue; 9037 auto *CI = cast<CallInst>(AssumeVH); 9038 if (!DT.dominates(CI, L->getHeader())) 9039 continue; 9040 9041 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9042 return true; 9043 } 9044 9045 return false; 9046 } 9047 9048 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9049 const SCEV *LHS, const SCEV *RHS, 9050 Value *FoundCondValue, 9051 bool Inverse) { 9052 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9053 return false; 9054 9055 auto ClearOnExit = 9056 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9057 9058 // Recursively handle And and Or conditions. 9059 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9060 if (BO->getOpcode() == Instruction::And) { 9061 if (!Inverse) 9062 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9063 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9064 } else if (BO->getOpcode() == Instruction::Or) { 9065 if (Inverse) 9066 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9067 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9068 } 9069 } 9070 9071 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9072 if (!ICI) return false; 9073 9074 // Now that we found a conditional branch that dominates the loop or controls 9075 // the loop latch. Check to see if it is the comparison we are looking for. 9076 ICmpInst::Predicate FoundPred; 9077 if (Inverse) 9078 FoundPred = ICI->getInversePredicate(); 9079 else 9080 FoundPred = ICI->getPredicate(); 9081 9082 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9083 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9084 9085 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9086 } 9087 9088 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9089 const SCEV *RHS, 9090 ICmpInst::Predicate FoundPred, 9091 const SCEV *FoundLHS, 9092 const SCEV *FoundRHS) { 9093 // Balance the types. 9094 if (getTypeSizeInBits(LHS->getType()) < 9095 getTypeSizeInBits(FoundLHS->getType())) { 9096 if (CmpInst::isSigned(Pred)) { 9097 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9098 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9099 } else { 9100 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9101 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9102 } 9103 } else if (getTypeSizeInBits(LHS->getType()) > 9104 getTypeSizeInBits(FoundLHS->getType())) { 9105 if (CmpInst::isSigned(FoundPred)) { 9106 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9107 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9108 } else { 9109 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9110 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9111 } 9112 } 9113 9114 // Canonicalize the query to match the way instcombine will have 9115 // canonicalized the comparison. 9116 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9117 if (LHS == RHS) 9118 return CmpInst::isTrueWhenEqual(Pred); 9119 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9120 if (FoundLHS == FoundRHS) 9121 return CmpInst::isFalseWhenEqual(FoundPred); 9122 9123 // Check to see if we can make the LHS or RHS match. 9124 if (LHS == FoundRHS || RHS == FoundLHS) { 9125 if (isa<SCEVConstant>(RHS)) { 9126 std::swap(FoundLHS, FoundRHS); 9127 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9128 } else { 9129 std::swap(LHS, RHS); 9130 Pred = ICmpInst::getSwappedPredicate(Pred); 9131 } 9132 } 9133 9134 // Check whether the found predicate is the same as the desired predicate. 9135 if (FoundPred == Pred) 9136 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9137 9138 // Check whether swapping the found predicate makes it the same as the 9139 // desired predicate. 9140 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9141 if (isa<SCEVConstant>(RHS)) 9142 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9143 else 9144 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9145 RHS, LHS, FoundLHS, FoundRHS); 9146 } 9147 9148 // Unsigned comparison is the same as signed comparison when both the operands 9149 // are non-negative. 9150 if (CmpInst::isUnsigned(FoundPred) && 9151 CmpInst::getSignedPredicate(FoundPred) == Pred && 9152 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9153 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9154 9155 // Check if we can make progress by sharpening ranges. 9156 if (FoundPred == ICmpInst::ICMP_NE && 9157 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9158 9159 const SCEVConstant *C = nullptr; 9160 const SCEV *V = nullptr; 9161 9162 if (isa<SCEVConstant>(FoundLHS)) { 9163 C = cast<SCEVConstant>(FoundLHS); 9164 V = FoundRHS; 9165 } else { 9166 C = cast<SCEVConstant>(FoundRHS); 9167 V = FoundLHS; 9168 } 9169 9170 // The guarding predicate tells us that C != V. If the known range 9171 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9172 // range we consider has to correspond to same signedness as the 9173 // predicate we're interested in folding. 9174 9175 APInt Min = ICmpInst::isSigned(Pred) ? 9176 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9177 9178 if (Min == C->getAPInt()) { 9179 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9180 // This is true even if (Min + 1) wraps around -- in case of 9181 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9182 9183 APInt SharperMin = Min + 1; 9184 9185 switch (Pred) { 9186 case ICmpInst::ICMP_SGE: 9187 case ICmpInst::ICMP_UGE: 9188 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9189 // RHS, we're done. 9190 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9191 getConstant(SharperMin))) 9192 return true; 9193 LLVM_FALLTHROUGH; 9194 9195 case ICmpInst::ICMP_SGT: 9196 case ICmpInst::ICMP_UGT: 9197 // We know from the range information that (V `Pred` Min || 9198 // V == Min). We know from the guarding condition that !(V 9199 // == Min). This gives us 9200 // 9201 // V `Pred` Min || V == Min && !(V == Min) 9202 // => V `Pred` Min 9203 // 9204 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9205 9206 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9207 return true; 9208 LLVM_FALLTHROUGH; 9209 9210 default: 9211 // No change 9212 break; 9213 } 9214 } 9215 } 9216 9217 // Check whether the actual condition is beyond sufficient. 9218 if (FoundPred == ICmpInst::ICMP_EQ) 9219 if (ICmpInst::isTrueWhenEqual(Pred)) 9220 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9221 return true; 9222 if (Pred == ICmpInst::ICMP_NE) 9223 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9224 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9225 return true; 9226 9227 // Otherwise assume the worst. 9228 return false; 9229 } 9230 9231 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9232 const SCEV *&L, const SCEV *&R, 9233 SCEV::NoWrapFlags &Flags) { 9234 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9235 if (!AE || AE->getNumOperands() != 2) 9236 return false; 9237 9238 L = AE->getOperand(0); 9239 R = AE->getOperand(1); 9240 Flags = AE->getNoWrapFlags(); 9241 return true; 9242 } 9243 9244 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9245 const SCEV *Less) { 9246 // We avoid subtracting expressions here because this function is usually 9247 // fairly deep in the call stack (i.e. is called many times). 9248 9249 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9250 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9251 const auto *MAR = cast<SCEVAddRecExpr>(More); 9252 9253 if (LAR->getLoop() != MAR->getLoop()) 9254 return None; 9255 9256 // We look at affine expressions only; not for correctness but to keep 9257 // getStepRecurrence cheap. 9258 if (!LAR->isAffine() || !MAR->isAffine()) 9259 return None; 9260 9261 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9262 return None; 9263 9264 Less = LAR->getStart(); 9265 More = MAR->getStart(); 9266 9267 // fall through 9268 } 9269 9270 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9271 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9272 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9273 return M - L; 9274 } 9275 9276 const SCEV *L, *R; 9277 SCEV::NoWrapFlags Flags; 9278 if (splitBinaryAdd(Less, L, R, Flags)) 9279 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 9280 if (R == More) 9281 return -(LC->getAPInt()); 9282 9283 if (splitBinaryAdd(More, L, R, Flags)) 9284 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 9285 if (R == Less) 9286 return LC->getAPInt(); 9287 9288 return None; 9289 } 9290 9291 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9292 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9293 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9294 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9295 return false; 9296 9297 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9298 if (!AddRecLHS) 9299 return false; 9300 9301 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9302 if (!AddRecFoundLHS) 9303 return false; 9304 9305 // We'd like to let SCEV reason about control dependencies, so we constrain 9306 // both the inequalities to be about add recurrences on the same loop. This 9307 // way we can use isLoopEntryGuardedByCond later. 9308 9309 const Loop *L = AddRecFoundLHS->getLoop(); 9310 if (L != AddRecLHS->getLoop()) 9311 return false; 9312 9313 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9314 // 9315 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9316 // ... (2) 9317 // 9318 // Informal proof for (2), assuming (1) [*]: 9319 // 9320 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9321 // 9322 // Then 9323 // 9324 // FoundLHS s< FoundRHS s< INT_MIN - C 9325 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9326 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9327 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9328 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9329 // <=> FoundLHS + C s< FoundRHS + C 9330 // 9331 // [*]: (1) can be proved by ruling out overflow. 9332 // 9333 // [**]: This can be proved by analyzing all the four possibilities: 9334 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9335 // (A s>= 0, B s>= 0). 9336 // 9337 // Note: 9338 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9339 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9340 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9341 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9342 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9343 // C)". 9344 9345 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9346 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9347 if (!LDiff || !RDiff || *LDiff != *RDiff) 9348 return false; 9349 9350 if (LDiff->isMinValue()) 9351 return true; 9352 9353 APInt FoundRHSLimit; 9354 9355 if (Pred == CmpInst::ICMP_ULT) { 9356 FoundRHSLimit = -(*RDiff); 9357 } else { 9358 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9359 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9360 } 9361 9362 // Try to prove (1) or (2), as needed. 9363 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9364 getConstant(FoundRHSLimit)); 9365 } 9366 9367 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 9368 const SCEV *LHS, const SCEV *RHS, 9369 const SCEV *FoundLHS, 9370 const SCEV *FoundRHS) { 9371 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9372 return true; 9373 9374 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9375 return true; 9376 9377 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 9378 FoundLHS, FoundRHS) || 9379 // ~x < ~y --> x > y 9380 isImpliedCondOperandsHelper(Pred, LHS, RHS, 9381 getNotSCEV(FoundRHS), 9382 getNotSCEV(FoundLHS)); 9383 } 9384 9385 /// If Expr computes ~A, return A else return nullptr 9386 static const SCEV *MatchNotExpr(const SCEV *Expr) { 9387 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 9388 if (!Add || Add->getNumOperands() != 2 || 9389 !Add->getOperand(0)->isAllOnesValue()) 9390 return nullptr; 9391 9392 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 9393 if (!AddRHS || AddRHS->getNumOperands() != 2 || 9394 !AddRHS->getOperand(0)->isAllOnesValue()) 9395 return nullptr; 9396 9397 return AddRHS->getOperand(1); 9398 } 9399 9400 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 9401 template<typename MaxExprType> 9402 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 9403 const SCEV *Candidate) { 9404 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 9405 if (!MaxExpr) return false; 9406 9407 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 9408 } 9409 9410 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 9411 template<typename MaxExprType> 9412 static bool IsMinConsistingOf(ScalarEvolution &SE, 9413 const SCEV *MaybeMinExpr, 9414 const SCEV *Candidate) { 9415 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 9416 if (!MaybeMaxExpr) 9417 return false; 9418 9419 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 9420 } 9421 9422 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 9423 ICmpInst::Predicate Pred, 9424 const SCEV *LHS, const SCEV *RHS) { 9425 // If both sides are affine addrecs for the same loop, with equal 9426 // steps, and we know the recurrences don't wrap, then we only 9427 // need to check the predicate on the starting values. 9428 9429 if (!ICmpInst::isRelational(Pred)) 9430 return false; 9431 9432 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 9433 if (!LAR) 9434 return false; 9435 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9436 if (!RAR) 9437 return false; 9438 if (LAR->getLoop() != RAR->getLoop()) 9439 return false; 9440 if (!LAR->isAffine() || !RAR->isAffine()) 9441 return false; 9442 9443 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 9444 return false; 9445 9446 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 9447 SCEV::FlagNSW : SCEV::FlagNUW; 9448 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 9449 return false; 9450 9451 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 9452 } 9453 9454 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 9455 /// expression? 9456 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 9457 ICmpInst::Predicate Pred, 9458 const SCEV *LHS, const SCEV *RHS) { 9459 switch (Pred) { 9460 default: 9461 return false; 9462 9463 case ICmpInst::ICMP_SGE: 9464 std::swap(LHS, RHS); 9465 LLVM_FALLTHROUGH; 9466 case ICmpInst::ICMP_SLE: 9467 return 9468 // min(A, ...) <= A 9469 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 9470 // A <= max(A, ...) 9471 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 9472 9473 case ICmpInst::ICMP_UGE: 9474 std::swap(LHS, RHS); 9475 LLVM_FALLTHROUGH; 9476 case ICmpInst::ICMP_ULE: 9477 return 9478 // min(A, ...) <= A 9479 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 9480 // A <= max(A, ...) 9481 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 9482 } 9483 9484 llvm_unreachable("covered switch fell through?!"); 9485 } 9486 9487 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 9488 const SCEV *LHS, const SCEV *RHS, 9489 const SCEV *FoundLHS, 9490 const SCEV *FoundRHS, 9491 unsigned Depth) { 9492 assert(getTypeSizeInBits(LHS->getType()) == 9493 getTypeSizeInBits(RHS->getType()) && 9494 "LHS and RHS have different sizes?"); 9495 assert(getTypeSizeInBits(FoundLHS->getType()) == 9496 getTypeSizeInBits(FoundRHS->getType()) && 9497 "FoundLHS and FoundRHS have different sizes?"); 9498 // We want to avoid hurting the compile time with analysis of too big trees. 9499 if (Depth > MaxSCEVOperationsImplicationDepth) 9500 return false; 9501 // We only want to work with ICMP_SGT comparison so far. 9502 // TODO: Extend to ICMP_UGT? 9503 if (Pred == ICmpInst::ICMP_SLT) { 9504 Pred = ICmpInst::ICMP_SGT; 9505 std::swap(LHS, RHS); 9506 std::swap(FoundLHS, FoundRHS); 9507 } 9508 if (Pred != ICmpInst::ICMP_SGT) 9509 return false; 9510 9511 auto GetOpFromSExt = [&](const SCEV *S) { 9512 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 9513 return Ext->getOperand(); 9514 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 9515 // the constant in some cases. 9516 return S; 9517 }; 9518 9519 // Acquire values from extensions. 9520 auto *OrigFoundLHS = FoundLHS; 9521 LHS = GetOpFromSExt(LHS); 9522 FoundLHS = GetOpFromSExt(FoundLHS); 9523 9524 // Is the SGT predicate can be proved trivially or using the found context. 9525 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 9526 return isKnownViaSimpleReasoning(ICmpInst::ICMP_SGT, S1, S2) || 9527 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 9528 FoundRHS, Depth + 1); 9529 }; 9530 9531 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 9532 // We want to avoid creation of any new non-constant SCEV. Since we are 9533 // going to compare the operands to RHS, we should be certain that we don't 9534 // need any size extensions for this. So let's decline all cases when the 9535 // sizes of types of LHS and RHS do not match. 9536 // TODO: Maybe try to get RHS from sext to catch more cases? 9537 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 9538 return false; 9539 9540 // Should not overflow. 9541 if (!LHSAddExpr->hasNoSignedWrap()) 9542 return false; 9543 9544 auto *LL = LHSAddExpr->getOperand(0); 9545 auto *LR = LHSAddExpr->getOperand(1); 9546 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 9547 9548 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 9549 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 9550 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 9551 }; 9552 // Try to prove the following rule: 9553 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 9554 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 9555 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 9556 return true; 9557 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 9558 Value *LL, *LR; 9559 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 9560 9561 using namespace llvm::PatternMatch; 9562 9563 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 9564 // Rules for division. 9565 // We are going to perform some comparisons with Denominator and its 9566 // derivative expressions. In general case, creating a SCEV for it may 9567 // lead to a complex analysis of the entire graph, and in particular it 9568 // can request trip count recalculation for the same loop. This would 9569 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 9570 // this, we only want to create SCEVs that are constants in this section. 9571 // So we bail if Denominator is not a constant. 9572 if (!isa<ConstantInt>(LR)) 9573 return false; 9574 9575 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 9576 9577 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 9578 // then a SCEV for the numerator already exists and matches with FoundLHS. 9579 auto *Numerator = getExistingSCEV(LL); 9580 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 9581 return false; 9582 9583 // Make sure that the numerator matches with FoundLHS and the denominator 9584 // is positive. 9585 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 9586 return false; 9587 9588 auto *DTy = Denominator->getType(); 9589 auto *FRHSTy = FoundRHS->getType(); 9590 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 9591 // One of types is a pointer and another one is not. We cannot extend 9592 // them properly to a wider type, so let us just reject this case. 9593 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 9594 // to avoid this check. 9595 return false; 9596 9597 // Given that: 9598 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 9599 auto *WTy = getWiderType(DTy, FRHSTy); 9600 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 9601 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 9602 9603 // Try to prove the following rule: 9604 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 9605 // For example, given that FoundLHS > 2. It means that FoundLHS is at 9606 // least 3. If we divide it by Denominator < 4, we will have at least 1. 9607 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 9608 if (isKnownNonPositive(RHS) && 9609 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 9610 return true; 9611 9612 // Try to prove the following rule: 9613 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 9614 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 9615 // If we divide it by Denominator > 2, then: 9616 // 1. If FoundLHS is negative, then the result is 0. 9617 // 2. If FoundLHS is non-negative, then the result is non-negative. 9618 // Anyways, the result is non-negative. 9619 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9620 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 9621 if (isKnownNegative(RHS) && 9622 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 9623 return true; 9624 } 9625 } 9626 9627 return false; 9628 } 9629 9630 bool 9631 ScalarEvolution::isKnownViaSimpleReasoning(ICmpInst::Predicate Pred, 9632 const SCEV *LHS, const SCEV *RHS) { 9633 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 9634 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 9635 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 9636 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 9637 } 9638 9639 bool 9640 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 9641 const SCEV *LHS, const SCEV *RHS, 9642 const SCEV *FoundLHS, 9643 const SCEV *FoundRHS) { 9644 switch (Pred) { 9645 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 9646 case ICmpInst::ICMP_EQ: 9647 case ICmpInst::ICMP_NE: 9648 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 9649 return true; 9650 break; 9651 case ICmpInst::ICMP_SLT: 9652 case ICmpInst::ICMP_SLE: 9653 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 9654 isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 9655 return true; 9656 break; 9657 case ICmpInst::ICMP_SGT: 9658 case ICmpInst::ICMP_SGE: 9659 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 9660 isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 9661 return true; 9662 break; 9663 case ICmpInst::ICMP_ULT: 9664 case ICmpInst::ICMP_ULE: 9665 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 9666 isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 9667 return true; 9668 break; 9669 case ICmpInst::ICMP_UGT: 9670 case ICmpInst::ICMP_UGE: 9671 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 9672 isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 9673 return true; 9674 break; 9675 } 9676 9677 // Maybe it can be proved via operations? 9678 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9679 return true; 9680 9681 return false; 9682 } 9683 9684 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 9685 const SCEV *LHS, 9686 const SCEV *RHS, 9687 const SCEV *FoundLHS, 9688 const SCEV *FoundRHS) { 9689 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 9690 // The restriction on `FoundRHS` be lifted easily -- it exists only to 9691 // reduce the compile time impact of this optimization. 9692 return false; 9693 9694 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 9695 if (!Addend) 9696 return false; 9697 9698 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 9699 9700 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 9701 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 9702 ConstantRange FoundLHSRange = 9703 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 9704 9705 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 9706 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 9707 9708 // We can also compute the range of values for `LHS` that satisfy the 9709 // consequent, "`LHS` `Pred` `RHS`": 9710 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 9711 ConstantRange SatisfyingLHSRange = 9712 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 9713 9714 // The antecedent implies the consequent if every value of `LHS` that 9715 // satisfies the antecedent also satisfies the consequent. 9716 return SatisfyingLHSRange.contains(LHSRange); 9717 } 9718 9719 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 9720 bool IsSigned, bool NoWrap) { 9721 assert(isKnownPositive(Stride) && "Positive stride expected!"); 9722 9723 if (NoWrap) return false; 9724 9725 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9726 const SCEV *One = getOne(Stride->getType()); 9727 9728 if (IsSigned) { 9729 APInt MaxRHS = getSignedRangeMax(RHS); 9730 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 9731 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9732 9733 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 9734 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 9735 } 9736 9737 APInt MaxRHS = getUnsignedRangeMax(RHS); 9738 APInt MaxValue = APInt::getMaxValue(BitWidth); 9739 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9740 9741 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 9742 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 9743 } 9744 9745 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 9746 bool IsSigned, bool NoWrap) { 9747 if (NoWrap) return false; 9748 9749 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9750 const SCEV *One = getOne(Stride->getType()); 9751 9752 if (IsSigned) { 9753 APInt MinRHS = getSignedRangeMin(RHS); 9754 APInt MinValue = APInt::getSignedMinValue(BitWidth); 9755 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9756 9757 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 9758 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 9759 } 9760 9761 APInt MinRHS = getUnsignedRangeMin(RHS); 9762 APInt MinValue = APInt::getMinValue(BitWidth); 9763 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9764 9765 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 9766 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 9767 } 9768 9769 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 9770 bool Equality) { 9771 const SCEV *One = getOne(Step->getType()); 9772 Delta = Equality ? getAddExpr(Delta, Step) 9773 : getAddExpr(Delta, getMinusSCEV(Step, One)); 9774 return getUDivExpr(Delta, Step); 9775 } 9776 9777 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 9778 const SCEV *Stride, 9779 const SCEV *End, 9780 unsigned BitWidth, 9781 bool IsSigned) { 9782 9783 assert(!isKnownNonPositive(Stride) && 9784 "Stride is expected strictly positive!"); 9785 // Calculate the maximum backedge count based on the range of values 9786 // permitted by Start, End, and Stride. 9787 const SCEV *MaxBECount; 9788 APInt MinStart = 9789 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 9790 9791 APInt StrideForMaxBECount = 9792 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 9793 9794 // We already know that the stride is positive, so we paper over conservatism 9795 // in our range computation by forcing StrideForMaxBECount to be at least one. 9796 // In theory this is unnecessary, but we expect MaxBECount to be a 9797 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 9798 // is nothing to constant fold it to). 9799 APInt One(BitWidth, 1, IsSigned); 9800 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 9801 9802 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 9803 : APInt::getMaxValue(BitWidth); 9804 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 9805 9806 // Although End can be a MAX expression we estimate MaxEnd considering only 9807 // the case End = RHS of the loop termination condition. This is safe because 9808 // in the other case (End - Start) is zero, leading to a zero maximum backedge 9809 // taken count. 9810 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 9811 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 9812 9813 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 9814 getConstant(StrideForMaxBECount) /* Step */, 9815 false /* Equality */); 9816 9817 return MaxBECount; 9818 } 9819 9820 ScalarEvolution::ExitLimit 9821 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 9822 const Loop *L, bool IsSigned, 9823 bool ControlsExit, bool AllowPredicates) { 9824 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9825 9826 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9827 bool PredicatedIV = false; 9828 9829 if (!IV && AllowPredicates) { 9830 // Try to make this an AddRec using runtime tests, in the first X 9831 // iterations of this loop, where X is the SCEV expression found by the 9832 // algorithm below. 9833 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9834 PredicatedIV = true; 9835 } 9836 9837 // Avoid weird loops 9838 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9839 return getCouldNotCompute(); 9840 9841 bool NoWrap = ControlsExit && 9842 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9843 9844 const SCEV *Stride = IV->getStepRecurrence(*this); 9845 9846 bool PositiveStride = isKnownPositive(Stride); 9847 9848 // Avoid negative or zero stride values. 9849 if (!PositiveStride) { 9850 // We can compute the correct backedge taken count for loops with unknown 9851 // strides if we can prove that the loop is not an infinite loop with side 9852 // effects. Here's the loop structure we are trying to handle - 9853 // 9854 // i = start 9855 // do { 9856 // A[i] = i; 9857 // i += s; 9858 // } while (i < end); 9859 // 9860 // The backedge taken count for such loops is evaluated as - 9861 // (max(end, start + stride) - start - 1) /u stride 9862 // 9863 // The additional preconditions that we need to check to prove correctness 9864 // of the above formula is as follows - 9865 // 9866 // a) IV is either nuw or nsw depending upon signedness (indicated by the 9867 // NoWrap flag). 9868 // b) loop is single exit with no side effects. 9869 // 9870 // 9871 // Precondition a) implies that if the stride is negative, this is a single 9872 // trip loop. The backedge taken count formula reduces to zero in this case. 9873 // 9874 // Precondition b) implies that the unknown stride cannot be zero otherwise 9875 // we have UB. 9876 // 9877 // The positive stride case is the same as isKnownPositive(Stride) returning 9878 // true (original behavior of the function). 9879 // 9880 // We want to make sure that the stride is truly unknown as there are edge 9881 // cases where ScalarEvolution propagates no wrap flags to the 9882 // post-increment/decrement IV even though the increment/decrement operation 9883 // itself is wrapping. The computed backedge taken count may be wrong in 9884 // such cases. This is prevented by checking that the stride is not known to 9885 // be either positive or non-positive. For example, no wrap flags are 9886 // propagated to the post-increment IV of this loop with a trip count of 2 - 9887 // 9888 // unsigned char i; 9889 // for(i=127; i<128; i+=129) 9890 // A[i] = i; 9891 // 9892 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 9893 !loopHasNoSideEffects(L)) 9894 return getCouldNotCompute(); 9895 } else if (!Stride->isOne() && 9896 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 9897 // Avoid proven overflow cases: this will ensure that the backedge taken 9898 // count will not generate any unsigned overflow. Relaxed no-overflow 9899 // conditions exploit NoWrapFlags, allowing to optimize in presence of 9900 // undefined behaviors like the case of C language. 9901 return getCouldNotCompute(); 9902 9903 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 9904 : ICmpInst::ICMP_ULT; 9905 const SCEV *Start = IV->getStart(); 9906 const SCEV *End = RHS; 9907 // When the RHS is not invariant, we do not know the end bound of the loop and 9908 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 9909 // calculate the MaxBECount, given the start, stride and max value for the end 9910 // bound of the loop (RHS), and the fact that IV does not overflow (which is 9911 // checked above). 9912 if (!isLoopInvariant(RHS, L)) { 9913 const SCEV *MaxBECount = computeMaxBECountForLT( 9914 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 9915 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 9916 false /*MaxOrZero*/, Predicates); 9917 } 9918 // If the backedge is taken at least once, then it will be taken 9919 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 9920 // is the LHS value of the less-than comparison the first time it is evaluated 9921 // and End is the RHS. 9922 const SCEV *BECountIfBackedgeTaken = 9923 computeBECount(getMinusSCEV(End, Start), Stride, false); 9924 // If the loop entry is guarded by the result of the backedge test of the 9925 // first loop iteration, then we know the backedge will be taken at least 9926 // once and so the backedge taken count is as above. If not then we use the 9927 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 9928 // as if the backedge is taken at least once max(End,Start) is End and so the 9929 // result is as above, and if not max(End,Start) is Start so we get a backedge 9930 // count of zero. 9931 const SCEV *BECount; 9932 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 9933 BECount = BECountIfBackedgeTaken; 9934 else { 9935 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 9936 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 9937 } 9938 9939 const SCEV *MaxBECount; 9940 bool MaxOrZero = false; 9941 if (isa<SCEVConstant>(BECount)) 9942 MaxBECount = BECount; 9943 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 9944 // If we know exactly how many times the backedge will be taken if it's 9945 // taken at least once, then the backedge count will either be that or 9946 // zero. 9947 MaxBECount = BECountIfBackedgeTaken; 9948 MaxOrZero = true; 9949 } else { 9950 MaxBECount = computeMaxBECountForLT( 9951 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 9952 } 9953 9954 if (isa<SCEVCouldNotCompute>(MaxBECount) && 9955 !isa<SCEVCouldNotCompute>(BECount)) 9956 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9957 9958 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 9959 } 9960 9961 ScalarEvolution::ExitLimit 9962 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 9963 const Loop *L, bool IsSigned, 9964 bool ControlsExit, bool AllowPredicates) { 9965 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9966 // We handle only IV > Invariant 9967 if (!isLoopInvariant(RHS, L)) 9968 return getCouldNotCompute(); 9969 9970 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9971 if (!IV && AllowPredicates) 9972 // Try to make this an AddRec using runtime tests, in the first X 9973 // iterations of this loop, where X is the SCEV expression found by the 9974 // algorithm below. 9975 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9976 9977 // Avoid weird loops 9978 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9979 return getCouldNotCompute(); 9980 9981 bool NoWrap = ControlsExit && 9982 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9983 9984 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 9985 9986 // Avoid negative or zero stride values 9987 if (!isKnownPositive(Stride)) 9988 return getCouldNotCompute(); 9989 9990 // Avoid proven overflow cases: this will ensure that the backedge taken count 9991 // will not generate any unsigned overflow. Relaxed no-overflow conditions 9992 // exploit NoWrapFlags, allowing to optimize in presence of undefined 9993 // behaviors like the case of C language. 9994 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 9995 return getCouldNotCompute(); 9996 9997 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 9998 : ICmpInst::ICMP_UGT; 9999 10000 const SCEV *Start = IV->getStart(); 10001 const SCEV *End = RHS; 10002 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10003 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10004 10005 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10006 10007 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10008 : getUnsignedRangeMax(Start); 10009 10010 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10011 : getUnsignedRangeMin(Stride); 10012 10013 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10014 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10015 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10016 10017 // Although End can be a MIN expression we estimate MinEnd considering only 10018 // the case End = RHS. This is safe because in the other case (Start - End) 10019 // is zero, leading to a zero maximum backedge taken count. 10020 APInt MinEnd = 10021 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10022 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10023 10024 10025 const SCEV *MaxBECount = getCouldNotCompute(); 10026 if (isa<SCEVConstant>(BECount)) 10027 MaxBECount = BECount; 10028 else 10029 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 10030 getConstant(MinStride), false); 10031 10032 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10033 MaxBECount = BECount; 10034 10035 return ExitLimit(BECount, MaxBECount, false, Predicates); 10036 } 10037 10038 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10039 ScalarEvolution &SE) const { 10040 if (Range.isFullSet()) // Infinite loop. 10041 return SE.getCouldNotCompute(); 10042 10043 // If the start is a non-zero constant, shift the range to simplify things. 10044 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10045 if (!SC->getValue()->isZero()) { 10046 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10047 Operands[0] = SE.getZero(SC->getType()); 10048 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10049 getNoWrapFlags(FlagNW)); 10050 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10051 return ShiftedAddRec->getNumIterationsInRange( 10052 Range.subtract(SC->getAPInt()), SE); 10053 // This is strange and shouldn't happen. 10054 return SE.getCouldNotCompute(); 10055 } 10056 10057 // The only time we can solve this is when we have all constant indices. 10058 // Otherwise, we cannot determine the overflow conditions. 10059 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10060 return SE.getCouldNotCompute(); 10061 10062 // Okay at this point we know that all elements of the chrec are constants and 10063 // that the start element is zero. 10064 10065 // First check to see if the range contains zero. If not, the first 10066 // iteration exits. 10067 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10068 if (!Range.contains(APInt(BitWidth, 0))) 10069 return SE.getZero(getType()); 10070 10071 if (isAffine()) { 10072 // If this is an affine expression then we have this situation: 10073 // Solve {0,+,A} in Range === Ax in Range 10074 10075 // We know that zero is in the range. If A is positive then we know that 10076 // the upper value of the range must be the first possible exit value. 10077 // If A is negative then the lower of the range is the last possible loop 10078 // value. Also note that we already checked for a full range. 10079 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10080 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10081 10082 // The exit value should be (End+A)/A. 10083 APInt ExitVal = (End + A).udiv(A); 10084 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10085 10086 // Evaluate at the exit value. If we really did fall out of the valid 10087 // range, then we computed our trip count, otherwise wrap around or other 10088 // things must have happened. 10089 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10090 if (Range.contains(Val->getValue())) 10091 return SE.getCouldNotCompute(); // Something strange happened 10092 10093 // Ensure that the previous value is in the range. This is a sanity check. 10094 assert(Range.contains( 10095 EvaluateConstantChrecAtConstant(this, 10096 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10097 "Linear scev computation is off in a bad way!"); 10098 return SE.getConstant(ExitValue); 10099 } else if (isQuadratic()) { 10100 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 10101 // quadratic equation to solve it. To do this, we must frame our problem in 10102 // terms of figuring out when zero is crossed, instead of when 10103 // Range.getUpper() is crossed. 10104 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 10105 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 10106 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 10107 10108 // Next, solve the constructed addrec 10109 if (auto Roots = 10110 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 10111 const SCEVConstant *R1 = Roots->first; 10112 const SCEVConstant *R2 = Roots->second; 10113 // Pick the smallest positive root value. 10114 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 10115 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 10116 if (!CB->getZExtValue()) 10117 std::swap(R1, R2); // R1 is the minimum root now. 10118 10119 // Make sure the root is not off by one. The returned iteration should 10120 // not be in the range, but the previous one should be. When solving 10121 // for "X*X < 5", for example, we should not return a root of 2. 10122 ConstantInt *R1Val = 10123 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 10124 if (Range.contains(R1Val->getValue())) { 10125 // The next iteration must be out of the range... 10126 ConstantInt *NextVal = 10127 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 10128 10129 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10130 if (!Range.contains(R1Val->getValue())) 10131 return SE.getConstant(NextVal); 10132 return SE.getCouldNotCompute(); // Something strange happened 10133 } 10134 10135 // If R1 was not in the range, then it is a good return value. Make 10136 // sure that R1-1 WAS in the range though, just in case. 10137 ConstantInt *NextVal = 10138 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 10139 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10140 if (Range.contains(R1Val->getValue())) 10141 return R1; 10142 return SE.getCouldNotCompute(); // Something strange happened 10143 } 10144 } 10145 } 10146 10147 return SE.getCouldNotCompute(); 10148 } 10149 10150 // Return true when S contains at least an undef value. 10151 static inline bool containsUndefs(const SCEV *S) { 10152 return SCEVExprContains(S, [](const SCEV *S) { 10153 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10154 return isa<UndefValue>(SU->getValue()); 10155 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 10156 return isa<UndefValue>(SC->getValue()); 10157 return false; 10158 }); 10159 } 10160 10161 namespace { 10162 10163 // Collect all steps of SCEV expressions. 10164 struct SCEVCollectStrides { 10165 ScalarEvolution &SE; 10166 SmallVectorImpl<const SCEV *> &Strides; 10167 10168 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10169 : SE(SE), Strides(S) {} 10170 10171 bool follow(const SCEV *S) { 10172 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10173 Strides.push_back(AR->getStepRecurrence(SE)); 10174 return true; 10175 } 10176 10177 bool isDone() const { return false; } 10178 }; 10179 10180 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10181 struct SCEVCollectTerms { 10182 SmallVectorImpl<const SCEV *> &Terms; 10183 10184 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10185 10186 bool follow(const SCEV *S) { 10187 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10188 isa<SCEVSignExtendExpr>(S)) { 10189 if (!containsUndefs(S)) 10190 Terms.push_back(S); 10191 10192 // Stop recursion: once we collected a term, do not walk its operands. 10193 return false; 10194 } 10195 10196 // Keep looking. 10197 return true; 10198 } 10199 10200 bool isDone() const { return false; } 10201 }; 10202 10203 // Check if a SCEV contains an AddRecExpr. 10204 struct SCEVHasAddRec { 10205 bool &ContainsAddRec; 10206 10207 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10208 ContainsAddRec = false; 10209 } 10210 10211 bool follow(const SCEV *S) { 10212 if (isa<SCEVAddRecExpr>(S)) { 10213 ContainsAddRec = true; 10214 10215 // Stop recursion: once we collected a term, do not walk its operands. 10216 return false; 10217 } 10218 10219 // Keep looking. 10220 return true; 10221 } 10222 10223 bool isDone() const { return false; } 10224 }; 10225 10226 // Find factors that are multiplied with an expression that (possibly as a 10227 // subexpression) contains an AddRecExpr. In the expression: 10228 // 10229 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10230 // 10231 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10232 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10233 // parameters as they form a product with an induction variable. 10234 // 10235 // This collector expects all array size parameters to be in the same MulExpr. 10236 // It might be necessary to later add support for collecting parameters that are 10237 // spread over different nested MulExpr. 10238 struct SCEVCollectAddRecMultiplies { 10239 SmallVectorImpl<const SCEV *> &Terms; 10240 ScalarEvolution &SE; 10241 10242 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10243 : Terms(T), SE(SE) {} 10244 10245 bool follow(const SCEV *S) { 10246 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10247 bool HasAddRec = false; 10248 SmallVector<const SCEV *, 0> Operands; 10249 for (auto Op : Mul->operands()) { 10250 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10251 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10252 Operands.push_back(Op); 10253 } else if (Unknown) { 10254 HasAddRec = true; 10255 } else { 10256 bool ContainsAddRec; 10257 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10258 visitAll(Op, ContiansAddRec); 10259 HasAddRec |= ContainsAddRec; 10260 } 10261 } 10262 if (Operands.size() == 0) 10263 return true; 10264 10265 if (!HasAddRec) 10266 return false; 10267 10268 Terms.push_back(SE.getMulExpr(Operands)); 10269 // Stop recursion: once we collected a term, do not walk its operands. 10270 return false; 10271 } 10272 10273 // Keep looking. 10274 return true; 10275 } 10276 10277 bool isDone() const { return false; } 10278 }; 10279 10280 } // end anonymous namespace 10281 10282 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10283 /// two places: 10284 /// 1) The strides of AddRec expressions. 10285 /// 2) Unknowns that are multiplied with AddRec expressions. 10286 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10287 SmallVectorImpl<const SCEV *> &Terms) { 10288 SmallVector<const SCEV *, 4> Strides; 10289 SCEVCollectStrides StrideCollector(*this, Strides); 10290 visitAll(Expr, StrideCollector); 10291 10292 DEBUG({ 10293 dbgs() << "Strides:\n"; 10294 for (const SCEV *S : Strides) 10295 dbgs() << *S << "\n"; 10296 }); 10297 10298 for (const SCEV *S : Strides) { 10299 SCEVCollectTerms TermCollector(Terms); 10300 visitAll(S, TermCollector); 10301 } 10302 10303 DEBUG({ 10304 dbgs() << "Terms:\n"; 10305 for (const SCEV *T : Terms) 10306 dbgs() << *T << "\n"; 10307 }); 10308 10309 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10310 visitAll(Expr, MulCollector); 10311 } 10312 10313 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10314 SmallVectorImpl<const SCEV *> &Terms, 10315 SmallVectorImpl<const SCEV *> &Sizes) { 10316 int Last = Terms.size() - 1; 10317 const SCEV *Step = Terms[Last]; 10318 10319 // End of recursion. 10320 if (Last == 0) { 10321 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10322 SmallVector<const SCEV *, 2> Qs; 10323 for (const SCEV *Op : M->operands()) 10324 if (!isa<SCEVConstant>(Op)) 10325 Qs.push_back(Op); 10326 10327 Step = SE.getMulExpr(Qs); 10328 } 10329 10330 Sizes.push_back(Step); 10331 return true; 10332 } 10333 10334 for (const SCEV *&Term : Terms) { 10335 // Normalize the terms before the next call to findArrayDimensionsRec. 10336 const SCEV *Q, *R; 10337 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10338 10339 // Bail out when GCD does not evenly divide one of the terms. 10340 if (!R->isZero()) 10341 return false; 10342 10343 Term = Q; 10344 } 10345 10346 // Remove all SCEVConstants. 10347 Terms.erase( 10348 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10349 Terms.end()); 10350 10351 if (Terms.size() > 0) 10352 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10353 return false; 10354 10355 Sizes.push_back(Step); 10356 return true; 10357 } 10358 10359 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10360 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 10361 for (const SCEV *T : Terms) 10362 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 10363 return true; 10364 return false; 10365 } 10366 10367 // Return the number of product terms in S. 10368 static inline int numberOfTerms(const SCEV *S) { 10369 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 10370 return Expr->getNumOperands(); 10371 return 1; 10372 } 10373 10374 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 10375 if (isa<SCEVConstant>(T)) 10376 return nullptr; 10377 10378 if (isa<SCEVUnknown>(T)) 10379 return T; 10380 10381 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 10382 SmallVector<const SCEV *, 2> Factors; 10383 for (const SCEV *Op : M->operands()) 10384 if (!isa<SCEVConstant>(Op)) 10385 Factors.push_back(Op); 10386 10387 return SE.getMulExpr(Factors); 10388 } 10389 10390 return T; 10391 } 10392 10393 /// Return the size of an element read or written by Inst. 10394 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 10395 Type *Ty; 10396 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 10397 Ty = Store->getValueOperand()->getType(); 10398 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 10399 Ty = Load->getType(); 10400 else 10401 return nullptr; 10402 10403 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 10404 return getSizeOfExpr(ETy, Ty); 10405 } 10406 10407 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 10408 SmallVectorImpl<const SCEV *> &Sizes, 10409 const SCEV *ElementSize) { 10410 if (Terms.size() < 1 || !ElementSize) 10411 return; 10412 10413 // Early return when Terms do not contain parameters: we do not delinearize 10414 // non parametric SCEVs. 10415 if (!containsParameters(Terms)) 10416 return; 10417 10418 DEBUG({ 10419 dbgs() << "Terms:\n"; 10420 for (const SCEV *T : Terms) 10421 dbgs() << *T << "\n"; 10422 }); 10423 10424 // Remove duplicates. 10425 array_pod_sort(Terms.begin(), Terms.end()); 10426 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 10427 10428 // Put larger terms first. 10429 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 10430 return numberOfTerms(LHS) > numberOfTerms(RHS); 10431 }); 10432 10433 // Try to divide all terms by the element size. If term is not divisible by 10434 // element size, proceed with the original term. 10435 for (const SCEV *&Term : Terms) { 10436 const SCEV *Q, *R; 10437 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 10438 if (!Q->isZero()) 10439 Term = Q; 10440 } 10441 10442 SmallVector<const SCEV *, 4> NewTerms; 10443 10444 // Remove constant factors. 10445 for (const SCEV *T : Terms) 10446 if (const SCEV *NewT = removeConstantFactors(*this, T)) 10447 NewTerms.push_back(NewT); 10448 10449 DEBUG({ 10450 dbgs() << "Terms after sorting:\n"; 10451 for (const SCEV *T : NewTerms) 10452 dbgs() << *T << "\n"; 10453 }); 10454 10455 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 10456 Sizes.clear(); 10457 return; 10458 } 10459 10460 // The last element to be pushed into Sizes is the size of an element. 10461 Sizes.push_back(ElementSize); 10462 10463 DEBUG({ 10464 dbgs() << "Sizes:\n"; 10465 for (const SCEV *S : Sizes) 10466 dbgs() << *S << "\n"; 10467 }); 10468 } 10469 10470 void ScalarEvolution::computeAccessFunctions( 10471 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 10472 SmallVectorImpl<const SCEV *> &Sizes) { 10473 // Early exit in case this SCEV is not an affine multivariate function. 10474 if (Sizes.empty()) 10475 return; 10476 10477 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 10478 if (!AR->isAffine()) 10479 return; 10480 10481 const SCEV *Res = Expr; 10482 int Last = Sizes.size() - 1; 10483 for (int i = Last; i >= 0; i--) { 10484 const SCEV *Q, *R; 10485 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 10486 10487 DEBUG({ 10488 dbgs() << "Res: " << *Res << "\n"; 10489 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 10490 dbgs() << "Res divided by Sizes[i]:\n"; 10491 dbgs() << "Quotient: " << *Q << "\n"; 10492 dbgs() << "Remainder: " << *R << "\n"; 10493 }); 10494 10495 Res = Q; 10496 10497 // Do not record the last subscript corresponding to the size of elements in 10498 // the array. 10499 if (i == Last) { 10500 10501 // Bail out if the remainder is too complex. 10502 if (isa<SCEVAddRecExpr>(R)) { 10503 Subscripts.clear(); 10504 Sizes.clear(); 10505 return; 10506 } 10507 10508 continue; 10509 } 10510 10511 // Record the access function for the current subscript. 10512 Subscripts.push_back(R); 10513 } 10514 10515 // Also push in last position the remainder of the last division: it will be 10516 // the access function of the innermost dimension. 10517 Subscripts.push_back(Res); 10518 10519 std::reverse(Subscripts.begin(), Subscripts.end()); 10520 10521 DEBUG({ 10522 dbgs() << "Subscripts:\n"; 10523 for (const SCEV *S : Subscripts) 10524 dbgs() << *S << "\n"; 10525 }); 10526 } 10527 10528 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 10529 /// sizes of an array access. Returns the remainder of the delinearization that 10530 /// is the offset start of the array. The SCEV->delinearize algorithm computes 10531 /// the multiples of SCEV coefficients: that is a pattern matching of sub 10532 /// expressions in the stride and base of a SCEV corresponding to the 10533 /// computation of a GCD (greatest common divisor) of base and stride. When 10534 /// SCEV->delinearize fails, it returns the SCEV unchanged. 10535 /// 10536 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 10537 /// 10538 /// void foo(long n, long m, long o, double A[n][m][o]) { 10539 /// 10540 /// for (long i = 0; i < n; i++) 10541 /// for (long j = 0; j < m; j++) 10542 /// for (long k = 0; k < o; k++) 10543 /// A[i][j][k] = 1.0; 10544 /// } 10545 /// 10546 /// the delinearization input is the following AddRec SCEV: 10547 /// 10548 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 10549 /// 10550 /// From this SCEV, we are able to say that the base offset of the access is %A 10551 /// because it appears as an offset that does not divide any of the strides in 10552 /// the loops: 10553 /// 10554 /// CHECK: Base offset: %A 10555 /// 10556 /// and then SCEV->delinearize determines the size of some of the dimensions of 10557 /// the array as these are the multiples by which the strides are happening: 10558 /// 10559 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 10560 /// 10561 /// Note that the outermost dimension remains of UnknownSize because there are 10562 /// no strides that would help identifying the size of the last dimension: when 10563 /// the array has been statically allocated, one could compute the size of that 10564 /// dimension by dividing the overall size of the array by the size of the known 10565 /// dimensions: %m * %o * 8. 10566 /// 10567 /// Finally delinearize provides the access functions for the array reference 10568 /// that does correspond to A[i][j][k] of the above C testcase: 10569 /// 10570 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 10571 /// 10572 /// The testcases are checking the output of a function pass: 10573 /// DelinearizationPass that walks through all loads and stores of a function 10574 /// asking for the SCEV of the memory access with respect to all enclosing 10575 /// loops, calling SCEV->delinearize on that and printing the results. 10576 void ScalarEvolution::delinearize(const SCEV *Expr, 10577 SmallVectorImpl<const SCEV *> &Subscripts, 10578 SmallVectorImpl<const SCEV *> &Sizes, 10579 const SCEV *ElementSize) { 10580 // First step: collect parametric terms. 10581 SmallVector<const SCEV *, 4> Terms; 10582 collectParametricTerms(Expr, Terms); 10583 10584 if (Terms.empty()) 10585 return; 10586 10587 // Second step: find subscript sizes. 10588 findArrayDimensions(Terms, Sizes, ElementSize); 10589 10590 if (Sizes.empty()) 10591 return; 10592 10593 // Third step: compute the access functions for each subscript. 10594 computeAccessFunctions(Expr, Subscripts, Sizes); 10595 10596 if (Subscripts.empty()) 10597 return; 10598 10599 DEBUG({ 10600 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 10601 dbgs() << "ArrayDecl[UnknownSize]"; 10602 for (const SCEV *S : Sizes) 10603 dbgs() << "[" << *S << "]"; 10604 10605 dbgs() << "\nArrayRef"; 10606 for (const SCEV *S : Subscripts) 10607 dbgs() << "[" << *S << "]"; 10608 dbgs() << "\n"; 10609 }); 10610 } 10611 10612 //===----------------------------------------------------------------------===// 10613 // SCEVCallbackVH Class Implementation 10614 //===----------------------------------------------------------------------===// 10615 10616 void ScalarEvolution::SCEVCallbackVH::deleted() { 10617 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10618 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 10619 SE->ConstantEvolutionLoopExitValue.erase(PN); 10620 SE->eraseValueFromMap(getValPtr()); 10621 // this now dangles! 10622 } 10623 10624 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 10625 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10626 10627 // Forget all the expressions associated with users of the old value, 10628 // so that future queries will recompute the expressions using the new 10629 // value. 10630 Value *Old = getValPtr(); 10631 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 10632 SmallPtrSet<User *, 8> Visited; 10633 while (!Worklist.empty()) { 10634 User *U = Worklist.pop_back_val(); 10635 // Deleting the Old value will cause this to dangle. Postpone 10636 // that until everything else is done. 10637 if (U == Old) 10638 continue; 10639 if (!Visited.insert(U).second) 10640 continue; 10641 if (PHINode *PN = dyn_cast<PHINode>(U)) 10642 SE->ConstantEvolutionLoopExitValue.erase(PN); 10643 SE->eraseValueFromMap(U); 10644 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 10645 } 10646 // Delete the Old value. 10647 if (PHINode *PN = dyn_cast<PHINode>(Old)) 10648 SE->ConstantEvolutionLoopExitValue.erase(PN); 10649 SE->eraseValueFromMap(Old); 10650 // this now dangles! 10651 } 10652 10653 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 10654 : CallbackVH(V), SE(se) {} 10655 10656 //===----------------------------------------------------------------------===// 10657 // ScalarEvolution Class Implementation 10658 //===----------------------------------------------------------------------===// 10659 10660 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 10661 AssumptionCache &AC, DominatorTree &DT, 10662 LoopInfo &LI) 10663 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 10664 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 10665 LoopDispositions(64), BlockDispositions(64) { 10666 // To use guards for proving predicates, we need to scan every instruction in 10667 // relevant basic blocks, and not just terminators. Doing this is a waste of 10668 // time if the IR does not actually contain any calls to 10669 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 10670 // 10671 // This pessimizes the case where a pass that preserves ScalarEvolution wants 10672 // to _add_ guards to the module when there weren't any before, and wants 10673 // ScalarEvolution to optimize based on those guards. For now we prefer to be 10674 // efficient in lieu of being smart in that rather obscure case. 10675 10676 auto *GuardDecl = F.getParent()->getFunction( 10677 Intrinsic::getName(Intrinsic::experimental_guard)); 10678 HasGuards = GuardDecl && !GuardDecl->use_empty(); 10679 } 10680 10681 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 10682 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 10683 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 10684 ValueExprMap(std::move(Arg.ValueExprMap)), 10685 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 10686 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 10687 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 10688 PredicatedBackedgeTakenCounts( 10689 std::move(Arg.PredicatedBackedgeTakenCounts)), 10690 ConstantEvolutionLoopExitValue( 10691 std::move(Arg.ConstantEvolutionLoopExitValue)), 10692 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 10693 LoopDispositions(std::move(Arg.LoopDispositions)), 10694 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 10695 BlockDispositions(std::move(Arg.BlockDispositions)), 10696 UnsignedRanges(std::move(Arg.UnsignedRanges)), 10697 SignedRanges(std::move(Arg.SignedRanges)), 10698 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 10699 UniquePreds(std::move(Arg.UniquePreds)), 10700 SCEVAllocator(std::move(Arg.SCEVAllocator)), 10701 LoopUsers(std::move(Arg.LoopUsers)), 10702 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 10703 FirstUnknown(Arg.FirstUnknown) { 10704 Arg.FirstUnknown = nullptr; 10705 } 10706 10707 ScalarEvolution::~ScalarEvolution() { 10708 // Iterate through all the SCEVUnknown instances and call their 10709 // destructors, so that they release their references to their values. 10710 for (SCEVUnknown *U = FirstUnknown; U;) { 10711 SCEVUnknown *Tmp = U; 10712 U = U->Next; 10713 Tmp->~SCEVUnknown(); 10714 } 10715 FirstUnknown = nullptr; 10716 10717 ExprValueMap.clear(); 10718 ValueExprMap.clear(); 10719 HasRecMap.clear(); 10720 10721 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 10722 // that a loop had multiple computable exits. 10723 for (auto &BTCI : BackedgeTakenCounts) 10724 BTCI.second.clear(); 10725 for (auto &BTCI : PredicatedBackedgeTakenCounts) 10726 BTCI.second.clear(); 10727 10728 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 10729 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 10730 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 10731 } 10732 10733 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 10734 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 10735 } 10736 10737 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 10738 const Loop *L) { 10739 // Print all inner loops first 10740 for (Loop *I : *L) 10741 PrintLoopInfo(OS, SE, I); 10742 10743 OS << "Loop "; 10744 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10745 OS << ": "; 10746 10747 SmallVector<BasicBlock *, 8> ExitBlocks; 10748 L->getExitBlocks(ExitBlocks); 10749 if (ExitBlocks.size() != 1) 10750 OS << "<multiple exits> "; 10751 10752 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10753 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 10754 } else { 10755 OS << "Unpredictable backedge-taken count. "; 10756 } 10757 10758 OS << "\n" 10759 "Loop "; 10760 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10761 OS << ": "; 10762 10763 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 10764 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 10765 if (SE->isBackedgeTakenCountMaxOrZero(L)) 10766 OS << ", actual taken count either this or zero."; 10767 } else { 10768 OS << "Unpredictable max backedge-taken count. "; 10769 } 10770 10771 OS << "\n" 10772 "Loop "; 10773 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10774 OS << ": "; 10775 10776 SCEVUnionPredicate Pred; 10777 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 10778 if (!isa<SCEVCouldNotCompute>(PBT)) { 10779 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 10780 OS << " Predicates:\n"; 10781 Pred.print(OS, 4); 10782 } else { 10783 OS << "Unpredictable predicated backedge-taken count. "; 10784 } 10785 OS << "\n"; 10786 10787 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10788 OS << "Loop "; 10789 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10790 OS << ": "; 10791 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 10792 } 10793 } 10794 10795 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 10796 switch (LD) { 10797 case ScalarEvolution::LoopVariant: 10798 return "Variant"; 10799 case ScalarEvolution::LoopInvariant: 10800 return "Invariant"; 10801 case ScalarEvolution::LoopComputable: 10802 return "Computable"; 10803 } 10804 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 10805 } 10806 10807 void ScalarEvolution::print(raw_ostream &OS) const { 10808 // ScalarEvolution's implementation of the print method is to print 10809 // out SCEV values of all instructions that are interesting. Doing 10810 // this potentially causes it to create new SCEV objects though, 10811 // which technically conflicts with the const qualifier. This isn't 10812 // observable from outside the class though, so casting away the 10813 // const isn't dangerous. 10814 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10815 10816 OS << "Classifying expressions for: "; 10817 F.printAsOperand(OS, /*PrintType=*/false); 10818 OS << "\n"; 10819 for (Instruction &I : instructions(F)) 10820 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 10821 OS << I << '\n'; 10822 OS << " --> "; 10823 const SCEV *SV = SE.getSCEV(&I); 10824 SV->print(OS); 10825 if (!isa<SCEVCouldNotCompute>(SV)) { 10826 OS << " U: "; 10827 SE.getUnsignedRange(SV).print(OS); 10828 OS << " S: "; 10829 SE.getSignedRange(SV).print(OS); 10830 } 10831 10832 const Loop *L = LI.getLoopFor(I.getParent()); 10833 10834 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 10835 if (AtUse != SV) { 10836 OS << " --> "; 10837 AtUse->print(OS); 10838 if (!isa<SCEVCouldNotCompute>(AtUse)) { 10839 OS << " U: "; 10840 SE.getUnsignedRange(AtUse).print(OS); 10841 OS << " S: "; 10842 SE.getSignedRange(AtUse).print(OS); 10843 } 10844 } 10845 10846 if (L) { 10847 OS << "\t\t" "Exits: "; 10848 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 10849 if (!SE.isLoopInvariant(ExitValue, L)) { 10850 OS << "<<Unknown>>"; 10851 } else { 10852 OS << *ExitValue; 10853 } 10854 10855 bool First = true; 10856 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 10857 if (First) { 10858 OS << "\t\t" "LoopDispositions: { "; 10859 First = false; 10860 } else { 10861 OS << ", "; 10862 } 10863 10864 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10865 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 10866 } 10867 10868 for (auto *InnerL : depth_first(L)) { 10869 if (InnerL == L) 10870 continue; 10871 if (First) { 10872 OS << "\t\t" "LoopDispositions: { "; 10873 First = false; 10874 } else { 10875 OS << ", "; 10876 } 10877 10878 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10879 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 10880 } 10881 10882 OS << " }"; 10883 } 10884 10885 OS << "\n"; 10886 } 10887 10888 OS << "Determining loop execution counts for: "; 10889 F.printAsOperand(OS, /*PrintType=*/false); 10890 OS << "\n"; 10891 for (Loop *I : LI) 10892 PrintLoopInfo(OS, &SE, I); 10893 } 10894 10895 ScalarEvolution::LoopDisposition 10896 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 10897 auto &Values = LoopDispositions[S]; 10898 for (auto &V : Values) { 10899 if (V.getPointer() == L) 10900 return V.getInt(); 10901 } 10902 Values.emplace_back(L, LoopVariant); 10903 LoopDisposition D = computeLoopDisposition(S, L); 10904 auto &Values2 = LoopDispositions[S]; 10905 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10906 if (V.getPointer() == L) { 10907 V.setInt(D); 10908 break; 10909 } 10910 } 10911 return D; 10912 } 10913 10914 ScalarEvolution::LoopDisposition 10915 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 10916 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10917 case scConstant: 10918 return LoopInvariant; 10919 case scTruncate: 10920 case scZeroExtend: 10921 case scSignExtend: 10922 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 10923 case scAddRecExpr: { 10924 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10925 10926 // If L is the addrec's loop, it's computable. 10927 if (AR->getLoop() == L) 10928 return LoopComputable; 10929 10930 // Add recurrences are never invariant in the function-body (null loop). 10931 if (!L) 10932 return LoopVariant; 10933 10934 // Everything that is not defined at loop entry is variant. 10935 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 10936 return LoopVariant; 10937 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 10938 " dominate the contained loop's header?"); 10939 10940 // This recurrence is invariant w.r.t. L if AR's loop contains L. 10941 if (AR->getLoop()->contains(L)) 10942 return LoopInvariant; 10943 10944 // This recurrence is variant w.r.t. L if any of its operands 10945 // are variant. 10946 for (auto *Op : AR->operands()) 10947 if (!isLoopInvariant(Op, L)) 10948 return LoopVariant; 10949 10950 // Otherwise it's loop-invariant. 10951 return LoopInvariant; 10952 } 10953 case scAddExpr: 10954 case scMulExpr: 10955 case scUMaxExpr: 10956 case scSMaxExpr: { 10957 bool HasVarying = false; 10958 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 10959 LoopDisposition D = getLoopDisposition(Op, L); 10960 if (D == LoopVariant) 10961 return LoopVariant; 10962 if (D == LoopComputable) 10963 HasVarying = true; 10964 } 10965 return HasVarying ? LoopComputable : LoopInvariant; 10966 } 10967 case scUDivExpr: { 10968 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10969 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 10970 if (LD == LoopVariant) 10971 return LoopVariant; 10972 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 10973 if (RD == LoopVariant) 10974 return LoopVariant; 10975 return (LD == LoopInvariant && RD == LoopInvariant) ? 10976 LoopInvariant : LoopComputable; 10977 } 10978 case scUnknown: 10979 // All non-instruction values are loop invariant. All instructions are loop 10980 // invariant if they are not contained in the specified loop. 10981 // Instructions are never considered invariant in the function body 10982 // (null loop) because they are defined within the "loop". 10983 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 10984 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 10985 return LoopInvariant; 10986 case scCouldNotCompute: 10987 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10988 } 10989 llvm_unreachable("Unknown SCEV kind!"); 10990 } 10991 10992 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 10993 return getLoopDisposition(S, L) == LoopInvariant; 10994 } 10995 10996 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 10997 return getLoopDisposition(S, L) == LoopComputable; 10998 } 10999 11000 ScalarEvolution::BlockDisposition 11001 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11002 auto &Values = BlockDispositions[S]; 11003 for (auto &V : Values) { 11004 if (V.getPointer() == BB) 11005 return V.getInt(); 11006 } 11007 Values.emplace_back(BB, DoesNotDominateBlock); 11008 BlockDisposition D = computeBlockDisposition(S, BB); 11009 auto &Values2 = BlockDispositions[S]; 11010 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11011 if (V.getPointer() == BB) { 11012 V.setInt(D); 11013 break; 11014 } 11015 } 11016 return D; 11017 } 11018 11019 ScalarEvolution::BlockDisposition 11020 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11021 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11022 case scConstant: 11023 return ProperlyDominatesBlock; 11024 case scTruncate: 11025 case scZeroExtend: 11026 case scSignExtend: 11027 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11028 case scAddRecExpr: { 11029 // This uses a "dominates" query instead of "properly dominates" query 11030 // to test for proper dominance too, because the instruction which 11031 // produces the addrec's value is a PHI, and a PHI effectively properly 11032 // dominates its entire containing block. 11033 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11034 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11035 return DoesNotDominateBlock; 11036 11037 // Fall through into SCEVNAryExpr handling. 11038 LLVM_FALLTHROUGH; 11039 } 11040 case scAddExpr: 11041 case scMulExpr: 11042 case scUMaxExpr: 11043 case scSMaxExpr: { 11044 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11045 bool Proper = true; 11046 for (const SCEV *NAryOp : NAry->operands()) { 11047 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11048 if (D == DoesNotDominateBlock) 11049 return DoesNotDominateBlock; 11050 if (D == DominatesBlock) 11051 Proper = false; 11052 } 11053 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11054 } 11055 case scUDivExpr: { 11056 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11057 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11058 BlockDisposition LD = getBlockDisposition(LHS, BB); 11059 if (LD == DoesNotDominateBlock) 11060 return DoesNotDominateBlock; 11061 BlockDisposition RD = getBlockDisposition(RHS, BB); 11062 if (RD == DoesNotDominateBlock) 11063 return DoesNotDominateBlock; 11064 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11065 ProperlyDominatesBlock : DominatesBlock; 11066 } 11067 case scUnknown: 11068 if (Instruction *I = 11069 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11070 if (I->getParent() == BB) 11071 return DominatesBlock; 11072 if (DT.properlyDominates(I->getParent(), BB)) 11073 return ProperlyDominatesBlock; 11074 return DoesNotDominateBlock; 11075 } 11076 return ProperlyDominatesBlock; 11077 case scCouldNotCompute: 11078 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11079 } 11080 llvm_unreachable("Unknown SCEV kind!"); 11081 } 11082 11083 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11084 return getBlockDisposition(S, BB) >= DominatesBlock; 11085 } 11086 11087 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11088 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11089 } 11090 11091 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11092 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11093 } 11094 11095 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11096 auto IsS = [&](const SCEV *X) { return S == X; }; 11097 auto ContainsS = [&](const SCEV *X) { 11098 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11099 }; 11100 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11101 } 11102 11103 void 11104 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11105 ValuesAtScopes.erase(S); 11106 LoopDispositions.erase(S); 11107 BlockDispositions.erase(S); 11108 UnsignedRanges.erase(S); 11109 SignedRanges.erase(S); 11110 ExprValueMap.erase(S); 11111 HasRecMap.erase(S); 11112 MinTrailingZerosCache.erase(S); 11113 11114 for (auto I = PredicatedSCEVRewrites.begin(); 11115 I != PredicatedSCEVRewrites.end();) { 11116 std::pair<const SCEV *, const Loop *> Entry = I->first; 11117 if (Entry.first == S) 11118 PredicatedSCEVRewrites.erase(I++); 11119 else 11120 ++I; 11121 } 11122 11123 auto RemoveSCEVFromBackedgeMap = 11124 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11125 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11126 BackedgeTakenInfo &BEInfo = I->second; 11127 if (BEInfo.hasOperand(S, this)) { 11128 BEInfo.clear(); 11129 Map.erase(I++); 11130 } else 11131 ++I; 11132 } 11133 }; 11134 11135 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11136 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11137 } 11138 11139 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11140 struct FindUsedLoops { 11141 SmallPtrSet<const Loop *, 8> LoopsUsed; 11142 bool follow(const SCEV *S) { 11143 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11144 LoopsUsed.insert(AR->getLoop()); 11145 return true; 11146 } 11147 11148 bool isDone() const { return false; } 11149 }; 11150 11151 FindUsedLoops F; 11152 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11153 11154 for (auto *L : F.LoopsUsed) 11155 LoopUsers[L].push_back(S); 11156 } 11157 11158 void ScalarEvolution::verify() const { 11159 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11160 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11161 11162 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11163 11164 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11165 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11166 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11167 11168 const SCEV *visitConstant(const SCEVConstant *Constant) { 11169 return SE.getConstant(Constant->getAPInt()); 11170 } 11171 11172 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11173 return SE.getUnknown(Expr->getValue()); 11174 } 11175 11176 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11177 return SE.getCouldNotCompute(); 11178 } 11179 }; 11180 11181 SCEVMapper SCM(SE2); 11182 11183 while (!LoopStack.empty()) { 11184 auto *L = LoopStack.pop_back_val(); 11185 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11186 11187 auto *CurBECount = SCM.visit( 11188 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11189 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11190 11191 if (CurBECount == SE2.getCouldNotCompute() || 11192 NewBECount == SE2.getCouldNotCompute()) { 11193 // NB! This situation is legal, but is very suspicious -- whatever pass 11194 // change the loop to make a trip count go from could not compute to 11195 // computable or vice-versa *should have* invalidated SCEV. However, we 11196 // choose not to assert here (for now) since we don't want false 11197 // positives. 11198 continue; 11199 } 11200 11201 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11202 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11203 // not propagate undef aggressively). This means we can (and do) fail 11204 // verification in cases where a transform makes the trip count of a loop 11205 // go from "undef" to "undef+1" (say). The transform is fine, since in 11206 // both cases the loop iterates "undef" times, but SCEV thinks we 11207 // increased the trip count of the loop by 1 incorrectly. 11208 continue; 11209 } 11210 11211 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11212 SE.getTypeSizeInBits(NewBECount->getType())) 11213 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11214 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11215 SE.getTypeSizeInBits(NewBECount->getType())) 11216 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11217 11218 auto *ConstantDelta = 11219 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11220 11221 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11222 dbgs() << "Trip Count Changed!\n"; 11223 dbgs() << "Old: " << *CurBECount << "\n"; 11224 dbgs() << "New: " << *NewBECount << "\n"; 11225 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11226 std::abort(); 11227 } 11228 } 11229 } 11230 11231 bool ScalarEvolution::invalidate( 11232 Function &F, const PreservedAnalyses &PA, 11233 FunctionAnalysisManager::Invalidator &Inv) { 11234 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11235 // of its dependencies is invalidated. 11236 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11237 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11238 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11239 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11240 Inv.invalidate<LoopAnalysis>(F, PA); 11241 } 11242 11243 AnalysisKey ScalarEvolutionAnalysis::Key; 11244 11245 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11246 FunctionAnalysisManager &AM) { 11247 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11248 AM.getResult<AssumptionAnalysis>(F), 11249 AM.getResult<DominatorTreeAnalysis>(F), 11250 AM.getResult<LoopAnalysis>(F)); 11251 } 11252 11253 PreservedAnalyses 11254 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11255 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11256 return PreservedAnalyses::all(); 11257 } 11258 11259 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11260 "Scalar Evolution Analysis", false, true) 11261 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11262 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11263 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11264 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11265 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11266 "Scalar Evolution Analysis", false, true) 11267 11268 char ScalarEvolutionWrapperPass::ID = 0; 11269 11270 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11271 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11272 } 11273 11274 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11275 SE.reset(new ScalarEvolution( 11276 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11277 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11278 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11279 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11280 return false; 11281 } 11282 11283 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11284 11285 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11286 SE->print(OS); 11287 } 11288 11289 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11290 if (!VerifySCEV) 11291 return; 11292 11293 SE->verify(); 11294 } 11295 11296 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11297 AU.setPreservesAll(); 11298 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11299 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11300 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11301 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11302 } 11303 11304 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11305 const SCEV *RHS) { 11306 FoldingSetNodeID ID; 11307 assert(LHS->getType() == RHS->getType() && 11308 "Type mismatch between LHS and RHS"); 11309 // Unique this node based on the arguments 11310 ID.AddInteger(SCEVPredicate::P_Equal); 11311 ID.AddPointer(LHS); 11312 ID.AddPointer(RHS); 11313 void *IP = nullptr; 11314 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11315 return S; 11316 SCEVEqualPredicate *Eq = new (SCEVAllocator) 11317 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 11318 UniquePreds.InsertNode(Eq, IP); 11319 return Eq; 11320 } 11321 11322 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 11323 const SCEVAddRecExpr *AR, 11324 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11325 FoldingSetNodeID ID; 11326 // Unique this node based on the arguments 11327 ID.AddInteger(SCEVPredicate::P_Wrap); 11328 ID.AddPointer(AR); 11329 ID.AddInteger(AddedFlags); 11330 void *IP = nullptr; 11331 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11332 return S; 11333 auto *OF = new (SCEVAllocator) 11334 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 11335 UniquePreds.InsertNode(OF, IP); 11336 return OF; 11337 } 11338 11339 namespace { 11340 11341 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 11342 public: 11343 11344 /// Rewrites \p S in the context of a loop L and the SCEV predication 11345 /// infrastructure. 11346 /// 11347 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 11348 /// equivalences present in \p Pred. 11349 /// 11350 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 11351 /// \p NewPreds such that the result will be an AddRecExpr. 11352 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 11353 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11354 SCEVUnionPredicate *Pred) { 11355 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 11356 return Rewriter.visit(S); 11357 } 11358 11359 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11360 if (Pred) { 11361 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 11362 for (auto *Pred : ExprPreds) 11363 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 11364 if (IPred->getLHS() == Expr) 11365 return IPred->getRHS(); 11366 } 11367 return convertToAddRecWithPreds(Expr); 11368 } 11369 11370 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 11371 const SCEV *Operand = visit(Expr->getOperand()); 11372 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11373 if (AR && AR->getLoop() == L && AR->isAffine()) { 11374 // This couldn't be folded because the operand didn't have the nuw 11375 // flag. Add the nusw flag as an assumption that we could make. 11376 const SCEV *Step = AR->getStepRecurrence(SE); 11377 Type *Ty = Expr->getType(); 11378 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 11379 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 11380 SE.getSignExtendExpr(Step, Ty), L, 11381 AR->getNoWrapFlags()); 11382 } 11383 return SE.getZeroExtendExpr(Operand, Expr->getType()); 11384 } 11385 11386 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 11387 const SCEV *Operand = visit(Expr->getOperand()); 11388 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11389 if (AR && AR->getLoop() == L && AR->isAffine()) { 11390 // This couldn't be folded because the operand didn't have the nsw 11391 // flag. Add the nssw flag as an assumption that we could make. 11392 const SCEV *Step = AR->getStepRecurrence(SE); 11393 Type *Ty = Expr->getType(); 11394 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 11395 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 11396 SE.getSignExtendExpr(Step, Ty), L, 11397 AR->getNoWrapFlags()); 11398 } 11399 return SE.getSignExtendExpr(Operand, Expr->getType()); 11400 } 11401 11402 private: 11403 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 11404 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11405 SCEVUnionPredicate *Pred) 11406 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 11407 11408 bool addOverflowAssumption(const SCEVPredicate *P) { 11409 if (!NewPreds) { 11410 // Check if we've already made this assumption. 11411 return Pred && Pred->implies(P); 11412 } 11413 NewPreds->insert(P); 11414 return true; 11415 } 11416 11417 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 11418 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11419 auto *A = SE.getWrapPredicate(AR, AddedFlags); 11420 return addOverflowAssumption(A); 11421 } 11422 11423 // If \p Expr represents a PHINode, we try to see if it can be represented 11424 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 11425 // to add this predicate as a runtime overflow check, we return the AddRec. 11426 // If \p Expr does not meet these conditions (is not a PHI node, or we 11427 // couldn't create an AddRec for it, or couldn't add the predicate), we just 11428 // return \p Expr. 11429 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 11430 if (!isa<PHINode>(Expr->getValue())) 11431 return Expr; 11432 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 11433 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 11434 if (!PredicatedRewrite) 11435 return Expr; 11436 for (auto *P : PredicatedRewrite->second){ 11437 if (!addOverflowAssumption(P)) 11438 return Expr; 11439 } 11440 return PredicatedRewrite->first; 11441 } 11442 11443 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 11444 SCEVUnionPredicate *Pred; 11445 const Loop *L; 11446 }; 11447 11448 } // end anonymous namespace 11449 11450 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 11451 SCEVUnionPredicate &Preds) { 11452 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 11453 } 11454 11455 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 11456 const SCEV *S, const Loop *L, 11457 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 11458 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 11459 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 11460 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 11461 11462 if (!AddRec) 11463 return nullptr; 11464 11465 // Since the transformation was successful, we can now transfer the SCEV 11466 // predicates. 11467 for (auto *P : TransformPreds) 11468 Preds.insert(P); 11469 11470 return AddRec; 11471 } 11472 11473 /// SCEV predicates 11474 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 11475 SCEVPredicateKind Kind) 11476 : FastID(ID), Kind(Kind) {} 11477 11478 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 11479 const SCEV *LHS, const SCEV *RHS) 11480 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 11481 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 11482 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 11483 } 11484 11485 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 11486 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 11487 11488 if (!Op) 11489 return false; 11490 11491 return Op->LHS == LHS && Op->RHS == RHS; 11492 } 11493 11494 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 11495 11496 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 11497 11498 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 11499 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 11500 } 11501 11502 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 11503 const SCEVAddRecExpr *AR, 11504 IncrementWrapFlags Flags) 11505 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 11506 11507 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 11508 11509 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 11510 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 11511 11512 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 11513 } 11514 11515 bool SCEVWrapPredicate::isAlwaysTrue() const { 11516 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 11517 IncrementWrapFlags IFlags = Flags; 11518 11519 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 11520 IFlags = clearFlags(IFlags, IncrementNSSW); 11521 11522 return IFlags == IncrementAnyWrap; 11523 } 11524 11525 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 11526 OS.indent(Depth) << *getExpr() << " Added Flags: "; 11527 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 11528 OS << "<nusw>"; 11529 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 11530 OS << "<nssw>"; 11531 OS << "\n"; 11532 } 11533 11534 SCEVWrapPredicate::IncrementWrapFlags 11535 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 11536 ScalarEvolution &SE) { 11537 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 11538 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 11539 11540 // We can safely transfer the NSW flag as NSSW. 11541 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 11542 ImpliedFlags = IncrementNSSW; 11543 11544 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 11545 // If the increment is positive, the SCEV NUW flag will also imply the 11546 // WrapPredicate NUSW flag. 11547 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 11548 if (Step->getValue()->getValue().isNonNegative()) 11549 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 11550 } 11551 11552 return ImpliedFlags; 11553 } 11554 11555 /// Union predicates don't get cached so create a dummy set ID for it. 11556 SCEVUnionPredicate::SCEVUnionPredicate() 11557 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 11558 11559 bool SCEVUnionPredicate::isAlwaysTrue() const { 11560 return all_of(Preds, 11561 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 11562 } 11563 11564 ArrayRef<const SCEVPredicate *> 11565 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 11566 auto I = SCEVToPreds.find(Expr); 11567 if (I == SCEVToPreds.end()) 11568 return ArrayRef<const SCEVPredicate *>(); 11569 return I->second; 11570 } 11571 11572 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 11573 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 11574 return all_of(Set->Preds, 11575 [this](const SCEVPredicate *I) { return this->implies(I); }); 11576 11577 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 11578 if (ScevPredsIt == SCEVToPreds.end()) 11579 return false; 11580 auto &SCEVPreds = ScevPredsIt->second; 11581 11582 return any_of(SCEVPreds, 11583 [N](const SCEVPredicate *I) { return I->implies(N); }); 11584 } 11585 11586 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 11587 11588 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 11589 for (auto Pred : Preds) 11590 Pred->print(OS, Depth); 11591 } 11592 11593 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 11594 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 11595 for (auto Pred : Set->Preds) 11596 add(Pred); 11597 return; 11598 } 11599 11600 if (implies(N)) 11601 return; 11602 11603 const SCEV *Key = N->getExpr(); 11604 assert(Key && "Only SCEVUnionPredicate doesn't have an " 11605 " associated expression!"); 11606 11607 SCEVToPreds[Key].push_back(N); 11608 Preds.push_back(N); 11609 } 11610 11611 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 11612 Loop &L) 11613 : SE(SE), L(L) {} 11614 11615 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 11616 const SCEV *Expr = SE.getSCEV(V); 11617 RewriteEntry &Entry = RewriteMap[Expr]; 11618 11619 // If we already have an entry and the version matches, return it. 11620 if (Entry.second && Generation == Entry.first) 11621 return Entry.second; 11622 11623 // We found an entry but it's stale. Rewrite the stale entry 11624 // according to the current predicate. 11625 if (Entry.second) 11626 Expr = Entry.second; 11627 11628 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 11629 Entry = {Generation, NewSCEV}; 11630 11631 return NewSCEV; 11632 } 11633 11634 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 11635 if (!BackedgeCount) { 11636 SCEVUnionPredicate BackedgePred; 11637 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 11638 addPredicate(BackedgePred); 11639 } 11640 return BackedgeCount; 11641 } 11642 11643 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 11644 if (Preds.implies(&Pred)) 11645 return; 11646 Preds.add(&Pred); 11647 updateGeneration(); 11648 } 11649 11650 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 11651 return Preds; 11652 } 11653 11654 void PredicatedScalarEvolution::updateGeneration() { 11655 // If the generation number wrapped recompute everything. 11656 if (++Generation == 0) { 11657 for (auto &II : RewriteMap) { 11658 const SCEV *Rewritten = II.second.second; 11659 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 11660 } 11661 } 11662 } 11663 11664 void PredicatedScalarEvolution::setNoOverflow( 11665 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11666 const SCEV *Expr = getSCEV(V); 11667 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11668 11669 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 11670 11671 // Clear the statically implied flags. 11672 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 11673 addPredicate(*SE.getWrapPredicate(AR, Flags)); 11674 11675 auto II = FlagsMap.insert({V, Flags}); 11676 if (!II.second) 11677 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 11678 } 11679 11680 bool PredicatedScalarEvolution::hasNoOverflow( 11681 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11682 const SCEV *Expr = getSCEV(V); 11683 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11684 11685 Flags = SCEVWrapPredicate::clearFlags( 11686 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 11687 11688 auto II = FlagsMap.find(V); 11689 11690 if (II != FlagsMap.end()) 11691 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 11692 11693 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 11694 } 11695 11696 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 11697 const SCEV *Expr = this->getSCEV(V); 11698 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 11699 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 11700 11701 if (!New) 11702 return nullptr; 11703 11704 for (auto *P : NewPreds) 11705 Preds.add(P); 11706 11707 updateGeneration(); 11708 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 11709 return New; 11710 } 11711 11712 PredicatedScalarEvolution::PredicatedScalarEvolution( 11713 const PredicatedScalarEvolution &Init) 11714 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 11715 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 11716 for (const auto &I : Init.FlagsMap) 11717 FlagsMap.insert(I); 11718 } 11719 11720 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 11721 // For each block. 11722 for (auto *BB : L.getBlocks()) 11723 for (auto &I : *BB) { 11724 if (!SE.isSCEVable(I.getType())) 11725 continue; 11726 11727 auto *Expr = SE.getSCEV(&I); 11728 auto II = RewriteMap.find(Expr); 11729 11730 if (II == RewriteMap.end()) 11731 continue; 11732 11733 // Don't print things that are not interesting. 11734 if (II->second.second == Expr) 11735 continue; 11736 11737 OS.indent(Depth) << "[PSE]" << I << ":\n"; 11738 OS.indent(Depth + 2) << *Expr << "\n"; 11739 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 11740 } 11741 } 11742